{ "cells": [ { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8ead072-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8ead1da-495a-11e8-ba2b-0242ac130002", "previous": null } }, "source": [ "# About: O07_02_Hadoop_Restore a \"Slave Node\"\n", "\n", "---\n", "\n", "In this notebook, **restore a Slave Node.**\n", "\n" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8ead1da-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8ead310-495a-11e8-ba2b-0242ac130002", "previous": "a8ead072-495a-11e8-ba2b-0242ac130002" } }, "source": [ "## *Operation Note*\n", "\n", "*This is a cell for your own recording. ここに経緯を記述*" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8ead310-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8ead446-495a-11e8-ba2b-0242ac130002", "previous": "a8ead1da-495a-11e8-ba2b-0242ac130002" } }, "source": [ "# Confirm Inventory\n", "\n", "Review the structure definition - `hosts.csv`:" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": true, "lc_cell_meme": { "current": "a8ead446-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8ead568-495a-11e8-ba2b-0242ac130002", "previous": "a8ead310-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "target_group = 'hadoop_all_cluster1'" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:48:38.828745", "start_time": "2016-05-17T06:48:38.820535" }, "collapsed": true, "lc_cell_meme": { "current": "a8ead568-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8ead68a-495a-11e8-ba2b-0242ac130002", "previous": "a8ead446-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "target_hostname = 'sn02031601'" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:48:55.781362", "start_time": "2016-05-17T06:48:55.685595" }, "lc_cell_meme": { "current": "a8ead68a-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8ead7ac-495a-11e8-ba2b-0242ac130002", "previous": "a8ead568-495a-11e8-ba2b-0242ac130002" }, "scrolled": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Cluster(Cluster1):\n" ] }, { "data": { "text/html": [ "
\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
ClusterTypeNameInternal IPService IPVCPUsMemory(MiB)DFS VolumesYARN VCPUsYARN Total Memory(MB)...HBase RegionServerTezHivePigClientSparkSpark HistoryServerKDC MasterKDC SlaveDocker
0Cluster1cn0107cn01070401XXX.XXX.XXX.85XXX.XXX.XXX.1971298304NaNNaNNaN...FalseFalseFalseFalseFalseFalseFalseTrueFalseFalse
1Cluster1cn0107cn01070402XXX.XXX.XXX.86XXX.XXX.XXX.1981298304NaNNaNNaN...FalseFalseFalseFalseFalseFalseFalseFalseTrueFalse
2Cluster1cn0107cn01070403XXX.XXX.XXX.87XXX.XXX.XXX.1991298304NaNNaNNaN...FalseFalseFalseFalseFalseFalseFalseFalseFalseFalse
3Cluster1cn0107cn01070404XXX.XXX.XXX.88XXX.XXX.XXX.2001298304NaNNaNNaN...FalseTrueTrueTrueTrueTrueTrueFalseFalseFalse
4Cluster1cn0107cn01070603XXX.XXX.XXX.83XXX.XXX.XXX.1951298304NaNNaNNaN...FalseFalseFalseFalseFalseFalseFalseFalseFalseTrue
5Cluster1cn0107cn01070604XXX.XXX.XXX.84XXX.XXX.XXX.1961298304NaNNaNNaN...FalseFalseFalseFalseFalseFalseFalseFalseFalseTrue
6Cluster1sn0202sn02020401XXX.XXX.XXX.12XXX.XXX.XXX.230166553610.015.064512.0...TrueFalseFalseFalseFalseFalseFalseFalseFalseFalse
7Cluster1sn0202sn02021201XXX.XXX.XXX.8XXX.XXX.XXX.228166553610.015.064512.0...TrueFalseFalseFalseFalseFalseFalseFalseFalseFalse
8Cluster1sn0202sn02022001XXX.XXX.XXX.4XXX.XXX.XXX.226166553610.015.064512.0...TrueFalseFalseFalseFalseFalseFalseFalseFalseFalse
9Cluster1sn0202sn02022401XXX.XXX.XXX.2XXX.XXX.XXX.225166553610.015.064512.0...TrueFalseFalseFalseFalseFalseFalseFalseFalseFalse
10Cluster1sn0203sn02030401XXX.XXX.XXX.24XXX.XXX.XXX.236166553610.015.064512.0...TrueFalseFalseFalseFalseFalseFalseFalseFalseFalse
11Cluster1sn0203sn02031201XXX.XXX.XXX.20XXX.XXX.XXX.234166553610.015.064512.0...TrueFalseFalseFalseFalseFalseFalseFalseFalseFalse
12Cluster1sn0203sn02032001XXX.XXX.XXX.16XXX.XXX.XXX.232166553610.015.064512.0...TrueFalseFalseFalseFalseFalseFalseFalseFalseFalse
13Cluster1sn0203sn02032401XXX.XXX.XXX.14XXX.XXX.XXX.231166553610.015.064512.0...TrueFalseFalseFalseFalseFalseFalseFalseFalseFalse
14Cluster1sn0203sn02031601XXX.XXX.XXX.18XXX.XXX.XXX.233166553610.015.064512.0...TrueFalseFalseFalseFalseFalseFalseFalseFalseFalse
\n", "

15 rows × 30 columns

\n", "
" ], "text/plain": [ " Cluster Type Name Internal IP Service IP VCPUs \\\n", "0 Cluster1 cn0107 cn01070401 XXX.XXX.XXX.85 XXX.XXX.XXX.197 12 \n", "1 Cluster1 cn0107 cn01070402 XXX.XXX.XXX.86 XXX.XXX.XXX.198 12 \n", "2 Cluster1 cn0107 cn01070403 XXX.XXX.XXX.87 XXX.XXX.XXX.199 12 \n", "3 Cluster1 cn0107 cn01070404 XXX.XXX.XXX.88 XXX.XXX.XXX.200 12 \n", "4 Cluster1 cn0107 cn01070603 XXX.XXX.XXX.83 XXX.XXX.XXX.195 12 \n", "5 Cluster1 cn0107 cn01070604 XXX.XXX.XXX.84 XXX.XXX.XXX.196 12 \n", "6 Cluster1 sn0202 sn02020401 XXX.XXX.XXX.12 XXX.XXX.XXX.230 16 \n", "7 Cluster1 sn0202 sn02021201 XXX.XXX.XXX.8 XXX.XXX.XXX.228 16 \n", "8 Cluster1 sn0202 sn02022001 XXX.XXX.XXX.4 XXX.XXX.XXX.226 16 \n", "9 Cluster1 sn0202 sn02022401 XXX.XXX.XXX.2 XXX.XXX.XXX.225 16 \n", "10 Cluster1 sn0203 sn02030401 XXX.XXX.XXX.24 XXX.XXX.XXX.236 16 \n", "11 Cluster1 sn0203 sn02031201 XXX.XXX.XXX.20 XXX.XXX.XXX.234 16 \n", "12 Cluster1 sn0203 sn02032001 XXX.XXX.XXX.16 XXX.XXX.XXX.232 16 \n", "13 Cluster1 sn0203 sn02032401 XXX.XXX.XXX.14 XXX.XXX.XXX.231 16 \n", "14 Cluster1 sn0203 sn02031601 XXX.XXX.XXX.18 XXX.XXX.XXX.233 16 \n", "\n", " Memory(MiB) DFS Volumes YARN VCPUs YARN Total Memory(MB) ... \\\n", "0 98304 NaN NaN NaN ... \n", "1 98304 NaN NaN NaN ... \n", "2 98304 NaN NaN NaN ... \n", "3 98304 NaN NaN NaN ... \n", "4 98304 NaN NaN NaN ... \n", "5 98304 NaN NaN NaN ... \n", "6 65536 10.0 15.0 64512.0 ... \n", "7 65536 10.0 15.0 64512.0 ... \n", "8 65536 10.0 15.0 64512.0 ... \n", "9 65536 10.0 15.0 64512.0 ... \n", "10 65536 10.0 15.0 64512.0 ... \n", "11 65536 10.0 15.0 64512.0 ... \n", "12 65536 10.0 15.0 64512.0 ... \n", "13 65536 10.0 15.0 64512.0 ... \n", "14 65536 10.0 15.0 64512.0 ... \n", "\n", " HBase RegionServer Tez Hive Pig Client Spark Spark HistoryServer \\\n", "0 False False False False False False False \n", "1 False False False False False False False \n", "2 False False False False False False False \n", "3 False True True True True True True \n", "4 False False False False False False False \n", "5 False False False False False False False \n", "6 True False False False False False False \n", "7 True False False False False False False \n", "8 True False False False False False False \n", "9 True False False False False False False \n", "10 True False False False False False False \n", "11 True False False False False False False \n", "12 True False False False False False False \n", "13 True False False False False False False \n", "14 True False False False False False False \n", "\n", " KDC Master KDC Slave Docker \n", "0 True False False \n", "1 False True False \n", "2 False False False \n", "3 False False False \n", "4 False False True \n", "5 False False True \n", "6 False False False \n", "7 False False False \n", "8 False False False \n", "9 False False False \n", "10 False False False \n", "11 False False False \n", "12 False False False \n", "13 False False False \n", "14 False False False \n", "\n", "[15 rows x 30 columns]" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%run scripts/loader.py\n", "\n", "TARGET_CLUSTER = 'Cluster1'\n", "\n", "header, machines = read_machines(\"hosts.csv\")\n", "machines = [m for m in machines if m['Cluster'] == TARGET_CLUSTER]\n", "\n", "print(\"Cluster(%s):\" % TARGET_CLUSTER)\n", "pd.DataFrame([get_row(header, m) for m in machines], columns=header)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:48:56.443569", "start_time": "2016-05-17T06:48:56.437704" }, "collapsed": true, "lc_cell_meme": { "current": "a8ead7ac-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8ead8ce-495a-11e8-ba2b-0242ac130002", "previous": "a8ead68a-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "target_hosts = filter(lambda m: m['Name'] == target_hostname, machines)\n", "assert(target_hosts)" ] }, { "cell_type": "markdown", "metadata": { "collapsed": true, "lc_cell_meme": { "current": "a8ead8ce-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8ead9f0-495a-11e8-ba2b-0242ac130002", "previous": "a8ead7ac-495a-11e8-ba2b-0242ac130002" } }, "source": [ "---\n", "For following sections remember new server's Service NIC address" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:48:58.048039", "start_time": "2016-05-17T06:48:58.044130" }, "lc_cell_meme": { "current": "a8ead9f0-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eadb08-495a-11e8-ba2b-0242ac130002", "previous": "a8ead8ce-495a-11e8-ba2b-0242ac130002" }, "scrolled": false }, "outputs": [ { "data": { "text/plain": [ "'XXX.XXX.XXX.233'" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "host_new_machine = target_hosts[0]['Service IP']\n", "host_new_machine" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eadb08-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eadc20-495a-11e8-ba2b-0242ac130002", "previous": "a8ead9f0-495a-11e8-ba2b-0242ac130002" } }, "source": [ "# Initialize and mount new HDD\n", "\n", "## Retrieve utility playbooks\n", "\n", "We would like to use utility playbooks to manage volumes. The playbooks are **PRIVATE** because they include our private information..." ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:49:06.259237", "start_time": "2016-05-17T06:49:04.904226" }, "lc_cell_meme": { "current": "a8eadc20-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eadd42-495a-11e8-ba2b-0242ac130002", "previous": "a8eadb08-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Cloning into '/tmp/tmpwDTh_U/original'...\n", "remote: Counting objects: 399, done.\u001b[K\n", "remote: Compressing objects: 100% (251/251), done.\u001b[K\n", "remote: Total 399 (delta 138), reused 196 (delta 55)\u001b[K\n", "Receiving objects: 100% (399/399), 37.95 KiB | 0 bytes/s, done.\n", "Resolving deltas: 100% (138/138), done.\n", "Checking connectivity... done.\n", "total 12\n", "drwx------ 3 root root 4096 Sep 2 18:56 .\n", "drwxrwxrwt 65 root root 4096 Sep 2 18:56 ..\n", "drwxr-xr-x 7 root root 4096 Sep 2 18:56 original\n" ] } ], "source": [ "import tempfile\n", "prereq_path = tempfile.mkdtemp()\n", "!mkdir -p {prereq_path}/original\n", "!git clone ssh://xxx.nii.ac.jp/xxx/aic-dataanalysis-prerequisite.git {prereq_path}/original\n", "!ls -la {prereq_path}" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:49:15.570988", "start_time": "2016-05-17T06:49:15.222170" }, "lc_cell_meme": { "current": "a8eadd42-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eade64-495a-11e8-ba2b-0242ac130002", "previous": "a8eadc20-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[sn02031601]\r\n", "XXX.XXX.XXX.233\r\n", "\r\n", "[sn0203:children]\r\n", "sn02031601\r\n", "\r\n", "[Cluster1:children]\r\n", "sn02031601\r\n", "\r\n", "[ganglia_masters:children]\r\n", "\r\n", "[kerberos_servers:children]\r\n", "kdc_masters\r\n", "kdc_slaves\r\n", "\r\n", "[kdc_masters:children]\r\n", "\r\n", "[kdc_masters:vars]\r\n", "kdc_role=master\r\n", "\r\n", "[kdc_slaves:children]\r\n", "\r\n", "[kdc_slaves:vars]\r\n", "kdc_role=slave\r\n", "\r\n", "[kerberos_clients:children]\r\n", "\r\n" ] } ], "source": [ "!mkdir -p {prereq_path}/current\n", "%run common/inventory-base.py\n", "import os\n", "\n", "with open(os.path.join(prereq_path, 'current', 'hosts'), 'w') as f:\n", " write_base_inventory(target_hosts, f)\n", "!cat {prereq_path}/current/hosts" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:49:31.700753", "start_time": "2016-05-17T06:49:31.639942" }, "lc_cell_meme": { "current": "a8eade64-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eadf86-495a-11e8-ba2b-0242ac130002", "previous": "a8eadd42-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "data": { "text/plain": [ "{'bonding_nic_1': '',\n", " 'bonding_nic_2': '',\n", " 'hdd_devices_and_mountpoints': [{'device': '/dev/sdc',\n", " 'mount': '/hadoop/tmp'},\n", " {'device': '/dev/sdd', 'mount': '/hadoop/data01'},\n", " {'device': '/dev/sde', 'mount': '/hadoop/data02'},\n", " {'device': '/dev/sdf', 'mount': '/hadoop/data03'},\n", " {'device': '/dev/sdg', 'mount': '/hadoop/data04'},\n", " {'device': '/dev/sdh', 'mount': '/hadoop/data05'},\n", " {'device': '/dev/sdi', 'mount': '/hadoop/data06'},\n", " {'device': '/dev/sdj', 'mount': '/hadoop/data07'},\n", " {'device': '/dev/sdk', 'mount': '/hadoop/data08'},\n", " {'device': '/dev/sdl', 'mount': '/hadoop/data09'},\n", " {'device': '/dev/sdm', 'mount': '/hadoop/data10'}],\n", " 'log_device': '/dev/sdb',\n", " 'server_nic_type': 'sn0203',\n", " 'server_type': 'sn'}" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import yaml\n", "with open(os.path.join(prereq_path, 'original', 'group_vars', target_hosts[0]['Type']), 'r') as f:\n", " group_vars = yaml.load(f)\n", "group_vars" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eadf86-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eae09e-495a-11e8-ba2b-0242ac130002", "previous": "a8eade64-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Test the inventory..." ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:50:10.105188", "start_time": "2016-05-17T06:50:08.714753" }, "lc_cell_meme": { "current": "a8eae09e-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eae1b6-495a-11e8-ba2b-0242ac130002", "previous": "a8eadf86-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS => {\r\n", " \"changed\": false, \r\n", " \"ping\": \"pong\"\r\n", "}\u001b[0m\r\n" ] } ], "source": [ "!ansible -m ping -i {prereq_path}/current/hosts all" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eae1b6-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eae2ce-495a-11e8-ba2b-0242ac130002", "previous": "a8eae09e-495a-11e8-ba2b-0242ac130002" } }, "source": [ "## Add Logical Disk to use the HDD" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:50:15.385476", "start_time": "2016-05-17T06:50:14.083321" }, "lc_cell_meme": { "current": "a8eae2ce-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eae3e6-495a-11e8-ba2b-0242ac130002", "previous": "a8eae1b6-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\r\n", "\r", " \r\n", "Adapter #0\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 0\r\n", "Drive's position: DiskGroup: 1, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 0\r\n", "WWN: 50014EE55556CA41\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: D1R2\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x50014ee55556ca42\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: WD WD3001FYYG D1R2WCC1F0082655 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :30C (86.00 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 1\r\n", "Drive's position: DiskGroup: 2, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 1\r\n", "WWN: 50014EE5AAAC5E2D\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: D1R2\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x50014ee5aaac5e2e\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: WD WD3001FYYG D1R2WCC1F0068736 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :31C (87.80 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 2\r\n", "Drive's position: DiskGroup: 3, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 2\r\n", "WWN: 50014EE500020F85\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: D1R2\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x50014ee500020f86\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: WD WD3001FYYG D1R2WCC1F0110913 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :30C (86.00 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 3\r\n", "Drive's position: DiskGroup: 4, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 3\r\n", "WWN: 50014EE55556F5F1\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: D1R2\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x50014ee55556f5f2\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: WD WD3001FYYG D1R2WCC1F0027341 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :29C (84.20 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 4\r\n", "Drive's position: DiskGroup: 11, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 4\r\n", "WWN: 5000C50084AF7558\r\n", "Sequence Number: 8\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: GS10\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x5000c50084af7559\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: SEAGATE ST3000NM0023 GS10Z1Y3Z7MA \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :31C (87.80 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 5\r\n", "Drive's position: DiskGroup: 5, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 5\r\n", "WWN: 50014EE5555768A9\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: D1R2\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x50014ee5555768aa\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: WD WD3001FYYG D1R2WCC1F0027451 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :30C (86.00 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 6\r\n", "Enclosure position: 1\r\n", "Device Id: 6\r\n", "WWN: 500003972838062C\r\n", "Sequence Number: 7\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 3.638 TB [0x1d1c0beb0 Sectors]\r\n", "Non Coerced Size: 3.637 TB [0x1d1b0beb0 Sectors]\r\n", "Coerced Size: 3.637 TB [0x1d1b00000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Unconfigured(good), Spun Up\r\n", "Device Firmware Level: DS06\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x500003972838062e\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: TOSHIBA MG04SCA40EN DS0676L0A075FVNC \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: Unknown \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :27C (80.60 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 7\r\n", "Drive's position: DiskGroup: 6, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 7\r\n", "WWN: 50014EE55557359D\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: D1R2\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x50014ee55557359e\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: WD WD3001FYYG D1R2WCC1F0070219 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :30C (86.00 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 8\r\n", "Drive's position: DiskGroup: 7, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 8\r\n", "WWN: 50014EE5AAAC2C09\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: D1R2\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x50014ee5aaac2c0a\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: WD WD3001FYYG D1R2WCC1F0088217 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :31C (87.80 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 9\r\n", "Drive's position: DiskGroup: 10, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 9\r\n", "WWN: 5000C50084A9E584\r\n", "Sequence Number: 12\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: GS10\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x5000c50084a9e585\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: SEAGATE ST3000NM0023 GS10Z1Y3X7J5 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :31C (87.80 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 10\r\n", "Drive's position: DiskGroup: 8, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 10\r\n", "WWN: 50014EE5AAAB7151\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: D1R2\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x50014ee5aaab7152\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: WD WD3001FYYG D1R2WCC1F0021668 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :32C (89.60 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 11\r\n", "Drive's position: DiskGroup: 9, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 11\r\n", "WWN: 50014EE5AAAC2B81\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 2.728 TB [0x15d50a3b0 Sectors]\r\n", "Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors]\r\n", "Coerced Size: 2.728 TB [0x15d400000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: D1R2\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x50014ee5aaac2b82\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: WD WD3001FYYG D1R2WCC1F0095409 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :31C (87.80 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 12\r\n", "Drive's position: DiskGroup: 0, Span: 0, Arm: 0\r\n", "Enclosure position: 1\r\n", "Device Id: 12\r\n", "WWN: 5000C5005EFECCD4\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 136.732 GB [0x11177328 Sectors]\r\n", "Non Coerced Size: 136.232 GB [0x11077328 Sectors]\r\n", "Coerced Size: 136.125 GB [0x11040000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: YS09\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x5000c5005efeccd5\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: SEAGATE ST9146853SS YS096XM22WTQ \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :48C (118.40 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "Enclosure Device ID: 32\r\n", "Slot Number: 13\r\n", "Drive's position: DiskGroup: 0, Span: 0, Arm: 1\r\n", "Enclosure position: 1\r\n", "Device Id: 13\r\n", "WWN: 5000C5005EFEFE84\r\n", "Sequence Number: 2\r\n", "Media Error Count: 0\r\n", "Other Error Count: 0\r\n", "Predictive Failure Count: 0\r\n", "Last Predictive Failure Event Seq Number: 0\r\n", "PD Type: SAS\r\n", "\r\n", "Raw Size: 136.732 GB [0x11177328 Sectors]\r\n", "Non Coerced Size: 136.232 GB [0x11077328 Sectors]\r\n", "Coerced Size: 136.125 GB [0x11040000 Sectors]\r\n", "Sector Size: 0\r\n", "Firmware state: Online, Spun Up\r\n", "Device Firmware Level: YS09\r\n", "Shield Counter: 0\r\n", "Successful diagnostics completion on : N/A\r\n", "SAS Address(0): 0x5000c5005efefe85\r\n", "SAS Address(1): 0x0\r\n", "Connected Port Number: 0(path0) \r\n", "Inquiry Data: SEAGATE ST9146853SS YS096XM22X09 \r\n", "FDE Capable: Not Capable\r\n", "FDE Enable: Disable\r\n", "Secured: Unsecured\r\n", "Locked: Unlocked\r\n", "Needs EKM Attention: No\r\n", "Foreign State: None \r\n", "Device Speed: 6.0Gb/s \r\n", "Link Speed: 6.0Gb/s \r\n", "Media Type: Hard Disk Device\r\n", "Drive Temperature :47C (116.60 F)\r\n", "PI Eligibility: No \r\n", "Drive is formatted for PI information: No\r\n", "PI: No PI\r\n", "Port-0 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: 6.0Gb/s \r\n", "Port-1 :\r\n", "Port status: Active\r\n", "Port's Linkspeed: Unknown \r\n", "Drive has flagged a S.M.A.R.T alert : No\r\n", "\r\n", "\r\n", "\r\n", "\r\n", "Exit Code: 0x00\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible -b -a '/opt/MegaRAID/MegaCli/MegaCli64 -PDList -a0' -i {prereq_path}/current/hosts all" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eae3e6-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eae4fe-495a-11e8-ba2b-0242ac130002", "previous": "a8eae2ce-495a-11e8-ba2b-0242ac130002" } }, "source": [ "The `Enclosure Device ID:Slot Number` for the new disk is... **32:6**" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:51:24.423746", "start_time": "2016-05-17T06:51:24.414384" }, "collapsed": true, "lc_cell_meme": { "current": "a8eae4fe-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eae620-495a-11e8-ba2b-0242ac130002", "previous": "a8eae3e6-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "new_hdd_location = (32, 6)" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eae620-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eae738-495a-11e8-ba2b-0242ac130002", "previous": "a8eae4fe-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Checking all Logical Drives..." ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:51:27.286109", "start_time": "2016-05-17T06:51:25.844970" }, "lc_cell_meme": { "current": "a8eae738-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eae850-495a-11e8-ba2b-0242ac130002", "previous": "a8eae620-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\r\n", "\r", " \r\n", "\r\n", "Adapter 0 -- Virtual Drive Information:\r\n", "Virtual Drive: 0 (Target Id: 0)\r\n", "Name :\r\n", "RAID Level : Primary-1, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 136.125 GB\r\n", "Sector Size : 512\r\n", "Mirror Data : 136.125 GB\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 2\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 1 (Target Id: 1)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 2 (Target Id: 2)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 3 (Target Id: 3)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 4 (Target Id: 4)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 5 (Target Id: 5)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 6 (Target Id: 6)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 8 (Target Id: 8)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 9 (Target Id: 9)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 10 (Target Id: 10)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 11 (Target Id: 11)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "Virtual Drive: 12 (Target Id: 12)\r\n", "Name :\r\n", "RAID Level : Primary-0, Secondary-0, RAID Level Qualifier-0\r\n", "Size : 2.728 TB\r\n", "Sector Size : 512\r\n", "Parity Size : 0\r\n", "State : Optimal\r\n", "Strip Size : 64 KB\r\n", "Number Of Drives : 1\r\n", "Span Depth : 1\r\n", "Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU\r\n", "Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU\r\n", "Default Access Policy: Read/Write\r\n", "Current Access Policy: Read/Write\r\n", "Disk Cache Policy : Disk's Default\r\n", "Encryption Type : None\r\n", "Default Power Savings Policy: Controller Defined\r\n", "Current Power Savings Policy: None\r\n", "Can spin up in 1 minute: Yes\r\n", "LD has drives that support T10 power conditions: Yes\r\n", "LD's IO profile supports MAX power savings with cached writes: No\r\n", "Bad Blocks Exist: No\r\n", "Is VD Cached: Yes\r\n", "Cache Cade Type : Read Only\r\n", "\r\n", "\r\n", "\r\n", "Exit Code: 0x00\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible -b -a '/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -Lall -a0' -i {prereq_path}/current/hosts all" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eae850-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eae97c-495a-11e8-ba2b-0242ac130002", "previous": "a8eae738-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Confirmed that **Logical Device 7** is missing.\n", "\n", "Add the new disk as a logical disk" ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:52:11.017010", "start_time": "2016-05-17T06:52:09.695182" }, "lc_cell_meme": { "current": "a8eae97c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaea94-495a-11e8-ba2b-0242ac130002", "previous": "a8eae850-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;31mXXX.XXX.XXX.233 | FAILED | rc=84 >>\r\n", "\r", " \r\n", "\r\n", "Adapter 0: Configure Adapter Failed\r\n", "\r\n", "FW error description: \r\n", " The current operation is not allowed because the controller has data in cache for offline or missing virtual drives. \r\n", "\r\n", "Exit Code: 0x54\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible -b -a '/opt/MegaRAID/MegaCli/MegaCli64 -CfgLdAdd -r0 [{new_hdd_location[0]}:{new_hdd_location[1]}] -a0' -i {prereq_path}/current/hosts all" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eaea94-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaebb6-495a-11e8-ba2b-0242ac130002", "previous": "a8eae97c-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Oops, the preserved cache remains... OK, let's remove it.\n", "\n", "Set the old LD." ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "collapsed": true, "lc_cell_meme": { "current": "a8eaebb6-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaecce-495a-11e8-ba2b-0242ac130002", "previous": "a8eaea94-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "previous_ld_number = 7" ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "lc_cell_meme": { "current": "a8eaecce-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaede6-495a-11e8-ba2b-0242ac130002", "previous": "a8eaebb6-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\r\n", "\r", " \r\n", "Adapter #0\r\n", "\r\n", "Virtual Drive(Target ID 07): Preserved Cache Data Cleared.\r\n", "\r\n", "Exit Code: 0x00\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible -b -a '/opt/MegaRAID/MegaCli/MegaCli64 -DiscardPreservedCache -L{previous_ld_number} -a0' -i {prereq_path}/current/hosts all" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eaede6-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaeef4-495a-11e8-ba2b-0242ac130002", "previous": "a8eaecce-495a-11e8-ba2b-0242ac130002" } }, "source": [ "OK, try to create LD..." ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "lc_cell_meme": { "current": "a8eaeef4-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaf00c-495a-11e8-ba2b-0242ac130002", "previous": "a8eaede6-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\r\n", "\r", " \r\n", "Adapter 0: Created VD 7\r\n", "\r\n", "Adapter 0: Configured the Adapter!!\r\n", "\r\n", "Exit Code: 0x00\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible -b -a '/opt/MegaRAID/MegaCli/MegaCli64 -CfgLdAdd -r0 [{new_hdd_location[0]}:{new_hdd_location[1]}] -a0' -i {prereq_path}/current/hosts all" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eaf00c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaf124-495a-11e8-ba2b-0242ac130002", "previous": "a8eaeef4-495a-11e8-ba2b-0242ac130002" } }, "source": [ "np...! The number of new VD is **7**" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eaf124-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaf23c-495a-11e8-ba2b-0242ac130002", "previous": "a8eaf00c-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Then, I'm going to **reboot the machine and recreate all volumes**.\n", "Rainy, let's dry run..." ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "lc_cell_meme": { "current": "a8eaf23c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaf354-495a-11e8-ba2b-0242ac130002", "previous": "a8eaf124-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Using /etc/ansible/ansible.cfg as config file\n", "\u001b[0;36mXXX.XXX.XXX.233 | SKIPPED\u001b[0m\n" ] } ], "source": [ "!ansible -CDv -b -a 'reboot' -i {prereq_path}/current/hosts all" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eaf354-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaf46c-495a-11e8-ba2b-0242ac130002", "previous": "a8eaf23c-495a-11e8-ba2b-0242ac130002" } }, "source": [ "OK, reboot it...!" ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "lc_cell_meme": { "current": "a8eaf46c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaf8cc-495a-11e8-ba2b-0242ac130002", "previous": "a8eaf354-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\r\n", "\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible -b -a 'reboot' -i {prereq_path}/current/hosts all" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eaf8cc-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eafa0c-495a-11e8-ba2b-0242ac130002", "previous": "a8eaf46c-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Wait for the machine..." ] }, { "cell_type": "code", "execution_count": 32, "metadata": { "lc_cell_meme": { "current": "a8eafa0c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eafb2e-495a-11e8-ba2b-0242ac130002", "previous": "a8eaf8cc-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "PING XXX.XXX.XXX.233 (XXX.XXX.XXX.233) 56(84) bytes of data.\n", "64 bytes from XXX.XXX.XXX.233: icmp_seq=1 ttl=63 time=0.365 ms\n", "64 bytes from XXX.XXX.XXX.233: icmp_seq=2 ttl=63 time=0.323 ms\n", "64 bytes from XXX.XXX.XXX.233: icmp_seq=3 ttl=63 time=0.290 ms\n", "64 bytes from XXX.XXX.XXX.233: icmp_seq=4 ttl=63 time=0.350 ms\n", "\n", "--- XXX.XXX.XXX.233 ping statistics ---\n", "4 packets transmitted, 4 received, 0% packet loss, time 2997ms\n", "rtt min/avg/max/mdev = 0.290/0.332/0.365/0.028 ms\n" ] } ], "source": [ "!ping -c 4 {host_new_machine}" ] }, { "cell_type": "code", "execution_count": 33, "metadata": { "lc_cell_meme": { "current": "a8eafb2e-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eafc46-495a-11e8-ba2b-0242ac130002", "previous": "a8eafa0c-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS => {\r\n", " \"changed\": false, \r\n", " \"ping\": \"pong\"\r\n", "}\u001b[0m\r\n" ] } ], "source": [ "!ansible -m ping {host_new_machine}" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eafc46-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eafd68-495a-11e8-ba2b-0242ac130002", "previous": "a8eafb2e-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Review mountpoints..." ] }, { "cell_type": "code", "execution_count": 34, "metadata": { "lc_cell_meme": { "current": "a8eafd68-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eafe80-495a-11e8-ba2b-0242ac130002", "previous": "a8eafc46-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\r\n", "Filesystem Size Used Avail Use% Mounted on\r\n", "/dev/sda2 14G 4.0G 9.3G 30% /\r\n", "tmpfs 34G 0 34G 0% /dev/shm\r\n", "/dev/sda5 726M 180M 509M 27% /mnt\r\n", "/dev/sdb1 3.0T 7.1G 2.8T 1% /var/log\r\n", "/dev/sdc1 3.0T 19G 2.8T 1% /hadoop/tmp\r\n", "/dev/sdd1 3.0T 46G 2.8T 2% /hadoop/data01\r\n", "/dev/sde1 3.0T 51G 2.8T 2% /hadoop/data02\r\n", "/dev/sdk1 3.0T 34G 2.8T 2% /hadoop/data03\r\n", "/dev/sdg1 3.0T 48G 2.8T 2% /hadoop/data04\r\n", "/dev/sdi1 3.0T 48G 2.8T 2% /hadoop/data06\r\n", "/dev/sdj1 3.0T 48G 2.8T 2% /hadoop/data07\r\n", "/dev/sdl1 3.0T 50G 2.8T 2% /hadoop/data09\r\n", "/dev/sdm1 3.0T 50G 2.8T 2% /hadoop/data10\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible -a 'df -H' -i {prereq_path}/current/hosts all" ] }, { "cell_type": "code", "execution_count": 35, "metadata": { "lc_cell_meme": { "current": "a8eafe80-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eaff98-495a-11e8-ba2b-0242ac130002", "previous": "a8eafd68-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "data": { "text/plain": [ "{'bonding_nic_1': '',\n", " 'bonding_nic_2': '',\n", " 'hdd_devices_and_mountpoints': [{'device': '/dev/sdc',\n", " 'mount': '/hadoop/tmp'},\n", " {'device': '/dev/sdd', 'mount': '/hadoop/data01'},\n", " {'device': '/dev/sde', 'mount': '/hadoop/data02'},\n", " {'device': '/dev/sdf', 'mount': '/hadoop/data03'},\n", " {'device': '/dev/sdg', 'mount': '/hadoop/data04'},\n", " {'device': '/dev/sdh', 'mount': '/hadoop/data05'},\n", " {'device': '/dev/sdi', 'mount': '/hadoop/data06'},\n", " {'device': '/dev/sdj', 'mount': '/hadoop/data07'},\n", " {'device': '/dev/sdk', 'mount': '/hadoop/data08'},\n", " {'device': '/dev/sdl', 'mount': '/hadoop/data09'},\n", " {'device': '/dev/sdm', 'mount': '/hadoop/data10'}],\n", " 'log_device': '/dev/sdb',\n", " 'server_nic_type': 'sn0203',\n", " 'server_type': 'sn'}" ] }, "execution_count": 35, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import yaml\n", "with open(os.path.join(prereq_path, 'original', 'group_vars', target_hosts[0]['Type']), 'r') as f:\n", " group_vars = yaml.load(f)\n", "group_vars" ] }, { "cell_type": "code", "execution_count": 36, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:54:57.895877", "start_time": "2016-05-17T06:54:57.549679" }, "lc_cell_meme": { "current": "a8eaff98-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb00b0-495a-11e8-ba2b-0242ac130002", "previous": "a8eafe80-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "bonding_nic_1: ''\r\n", "bonding_nic_2: ''\r\n", "hdd_devices_and_mountpoints:\r\n", "- {device: /dev/sdc, mount: /hadoop/tmp}\r\n", "- {device: /dev/sdd, mount: /hadoop/data01}\r\n", "- {device: /dev/sde, mount: /hadoop/data02}\r\n", "- {device: /dev/sdf, mount: /hadoop/data03}\r\n", "- {device: /dev/sdg, mount: /hadoop/data04}\r\n", "- {device: /dev/sdh, mount: /hadoop/data05}\r\n", "- {device: /dev/sdi, mount: /hadoop/data06}\r\n", "- {device: /dev/sdj, mount: /hadoop/data07}\r\n", "- {device: /dev/sdk, mount: /hadoop/data08}\r\n", "- {device: /dev/sdl, mount: /hadoop/data09}\r\n", "- {device: /dev/sdm, mount: /hadoop/data10}\r\n", "server_nic_type: sn0203\r\n", "server_type: sn\r\n" ] } ], "source": [ "!mkdir -p {prereq_path}/current/group_vars\n", "del group_vars['log_device']\n", "with open(os.path.join(prereq_path, 'current', 'group_vars', target_hosts[0]['Type']), 'w') as f:\n", " f.write(yaml.dump(group_vars))\n", "!cat {prereq_path}/current/group_vars/{target_hosts[0]['Type']}" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb00b0-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb01c8-495a-11e8-ba2b-0242ac130002", "previous": "a8eaff98-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Remove all files on all volumes..." ] }, { "cell_type": "code", "execution_count": 37, "metadata": { "lc_cell_meme": { "current": "a8eb01c8-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb02e0-495a-11e8-ba2b-0242ac130002", "previous": "a8eb00b0-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n", "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n", "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n", "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n", "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n", "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n", "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n", "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n", "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n", "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n", "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\n", "\n", "\u001b[0m\n" ] } ], "source": [ "for hdd in group_vars['hdd_devices_and_mountpoints']:\n", " if hdd['mount'] == '/hadoop/tmp':\n", " dirs = os.path.join(hdd['mount'], 'hadoop-yarn')\n", " else:\n", " dirs = os.path.join(hdd['mount'], 'dfs')\n", " !ansible -b -a 'rm -fr {dirs}' -i {prereq_path}/current/hosts all" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb02e0-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb03f8-495a-11e8-ba2b-0242ac130002", "previous": "a8eb01c8-495a-11e8-ba2b-0242ac130002" } }, "source": [ "## Format the HDD" ] }, { "cell_type": "code", "execution_count": 38, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:55:02.394232", "start_time": "2016-05-17T06:55:02.222434" }, "collapsed": true, "lc_cell_meme": { "current": "a8eb03f8-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb0506-495a-11e8-ba2b-0242ac130002", "previous": "a8eb02e0-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "!cp {prereq_path}/original/playbooks/destructive/clean-hdds.yml {prereq_path}/current/" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb0506-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb0628-495a-11e8-ba2b-0242ac130002", "previous": "a8eb03f8-495a-11e8-ba2b-0242ac130002" } }, "source": [ "First, let's dry run...!" ] }, { "cell_type": "code", "execution_count": 39, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:56:01.798694", "start_time": "2016-05-17T06:55:56.424112" }, "lc_cell_meme": { "current": "a8eb0628-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb0740-495a-11e8-ba2b-0242ac130002", "previous": "a8eb0506-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Using /etc/ansible/ansible.cfg as config file\n", "\u001b[0;35m[DEPRECATION WARNING]: Instead of sudo/sudo_user, use become/become_user and \n", "make sure become_method is 'sudo' (default).\n", "This feature will be removed in a \n", "future release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\n", "PLAY [all] *********************************************************************\n", "\n", "TASK [setup] *******************************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [install gdisk package.] **************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"msg\": \"\", \"rc\": 0, \"results\": [\"gdisk-0.8.10-1.el6.x86_64 providing gdisk is already installed\"]}\u001b[0m\n", "\n", "TASK [unmount disks to be formatted.] ******************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [zap GPT and MBR data structure of HDD.] **********************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [create partition.] *******************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [unmount disks to be formatted.] ******************************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdc\", \"mount\": \"/hadoop/tmp\"}, \"name\": \"/hadoop/tmp\", \"src\": \"/dev/sdc1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdd\", \"mount\": \"/hadoop/data01\"}, \"name\": \"/hadoop/data01\", \"src\": \"/dev/sdd1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sde\", \"mount\": \"/hadoop/data02\"}, \"name\": \"/hadoop/data02\", \"src\": \"/dev/sde1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdf\", \"mount\": \"/hadoop/data03\"}, \"name\": \"/hadoop/data03\", \"src\": \"/dev/sdf1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdg\", \"mount\": \"/hadoop/data04\"}, \"name\": \"/hadoop/data04\", \"src\": \"/dev/sdg1\"}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'}) => {\"changed\": false, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdh\", \"mount\": \"/hadoop/data05\"}, \"name\": \"/hadoop/data05\", \"src\": \"/dev/sdh1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdi\", \"mount\": \"/hadoop/data06\"}, \"name\": \"/hadoop/data06\", \"src\": \"/dev/sdi1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdj\", \"mount\": \"/hadoop/data07\"}, \"name\": \"/hadoop/data07\", \"src\": \"/dev/sdj1\"}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'}) => {\"changed\": false, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdk\", \"mount\": \"/hadoop/data08\"}, \"name\": \"/hadoop/data08\", \"src\": \"/dev/sdk1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdl\", \"mount\": \"/hadoop/data09\"}, \"name\": \"/hadoop/data09\", \"src\": \"/dev/sdl1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdm\", \"mount\": \"/hadoop/data10\"}, \"name\": \"/hadoop/data10\", \"src\": \"/dev/sdm1\"}\u001b[0m\n", "\n", "TASK [zap GPT and MBR data structure of HDD.] **********************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdc\", \"mount\": \"/hadoop/tmp\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdd\", \"mount\": \"/hadoop/data01\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sde\", \"mount\": \"/hadoop/data02\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdf\", \"mount\": \"/hadoop/data03\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdg\", \"mount\": \"/hadoop/data04\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdh\", \"mount\": \"/hadoop/data05\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdi\", \"mount\": \"/hadoop/data06\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdj\", \"mount\": \"/hadoop/data07\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdk\", \"mount\": \"/hadoop/data08\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdl\", \"mount\": \"/hadoop/data09\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdm\", \"mount\": \"/hadoop/data10\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [create partition.] *******************************************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdc\", \"mount\": \"/hadoop/tmp\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdd\", \"mount\": \"/hadoop/data01\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sde\", \"mount\": \"/hadoop/data02\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdf\", \"mount\": \"/hadoop/data03\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdg\", \"mount\": \"/hadoop/data04\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdh\", \"mount\": \"/hadoop/data05\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdi\", \"mount\": \"/hadoop/data06\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdj\", \"mount\": \"/hadoop/data07\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdk\", \"mount\": \"/hadoop/data08\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdl\", \"mount\": \"/hadoop/data09\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdm\", \"mount\": \"/hadoop/data10\"}, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\n", "PLAY RECAP *********************************************************************\n", "\u001b[0;33mXXX.XXX.XXX.233\u001b[0m : \u001b[0;32mok\u001b[0m\u001b[0;32m=\u001b[0m\u001b[0;32m3\u001b[0m \u001b[0;33mchanged\u001b[0m\u001b[0;33m=\u001b[0m\u001b[0;33m1\u001b[0m unreachable=0 failed=0 \n", "\n" ] } ], "source": [ "!ansible-playbook -CDv -i {prereq_path}/current/hosts {prereq_path}/current/clean-hdds.yml" ] }, { "cell_type": "code", "execution_count": 40, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:56:11.901094", "start_time": "2016-05-17T06:56:05.915523" }, "lc_cell_meme": { "current": "a8eb0740-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb0858-495a-11e8-ba2b-0242ac130002", "previous": "a8eb0628-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;35m[DEPRECATION WARNING]: Instead of sudo/sudo_user, use become/become_user and \n", "make sure become_method is 'sudo' (default).\n", "This feature will be removed in a \n", "future release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\n", "PLAY [all] *********************************************************************\n", "\n", "TASK [setup] *******************************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [install gdisk package.] **************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [unmount disks to be formatted.] ******************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [zap GPT and MBR data structure of HDD.] **********************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [create partition.] *******************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [unmount disks to be formatted.] ******************************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'})\u001b[0m\n", "\n", "TASK [zap GPT and MBR data structure of HDD.] **********************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'})\u001b[0m\n", "\n", "TASK [create partition.] *******************************************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'})\u001b[0m\n", "\n", "PLAY RECAP *********************************************************************\n", "\u001b[0;33mXXX.XXX.XXX.233\u001b[0m : \u001b[0;32mok\u001b[0m\u001b[0;32m=\u001b[0m\u001b[0;32m5\u001b[0m \u001b[0;33mchanged\u001b[0m\u001b[0;33m=\u001b[0m\u001b[0;33m3\u001b[0m unreachable=0 failed=0 \n", "\n" ] } ], "source": [ "!ansible-playbook -i {prereq_path}/current/hosts {prereq_path}/current/clean-hdds.yml" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb0858-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb0984-495a-11e8-ba2b-0242ac130002", "previous": "a8eb0740-495a-11e8-ba2b-0242ac130002" } }, "source": [ "## Mount the disk" ] }, { "cell_type": "code", "execution_count": 41, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:56:58.390923", "start_time": "2016-05-17T06:56:58.199022" }, "collapsed": true, "lc_cell_meme": { "current": "a8eb0984-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb0a9c-495a-11e8-ba2b-0242ac130002", "previous": "a8eb0858-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "!cp -fr {prereq_path}/original/roles {prereq_path}/current/roles" ] }, { "cell_type": "code", "execution_count": 42, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:56:59.767672", "start_time": "2016-05-17T06:56:59.758907" }, "lc_cell_meme": { "current": "a8eb0a9c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb0bb4-495a-11e8-ba2b-0242ac130002", "previous": "a8eb0984-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Writing /tmp/tmpwDTh_U/current/mount.yml\n" ] } ], "source": [ "%%writefile {prereq_path}/current/mount.yml\n", "- hosts: all\n", " become: yes\n", " roles:\n", " - mount" ] }, { "cell_type": "code", "execution_count": 43, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T06:57:05.923069", "start_time": "2016-05-17T06:57:01.454150" }, "lc_cell_meme": { "current": "a8eb0bb4-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb0cd6-495a-11e8-ba2b-0242ac130002", "previous": "a8eb0a9c-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Using /etc/ansible/ansible.cfg as config file\n", "\n", "PLAY [all] *********************************************************************\n", "\n", "TASK [setup] *******************************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [mount : mkfs for /var/log] ***********************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [mount : create a temporary directory for previous /var/log] **************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [mount : copy files in /var/log to /tmp/old-log to keep logs] *************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [mount : mount /var/log] **************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [mount : make filesystem on devices] **************************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdc\", \"mount\": \"/hadoop/tmp\"}}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdd\", \"mount\": \"/hadoop/data01\"}}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sde\", \"mount\": \"/hadoop/data02\"}}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdf\", \"mount\": \"/hadoop/data03\"}}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdg\", \"mount\": \"/hadoop/data04\"}}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'}) => {\"changed\": true, \"item\": {\"device\": \"/dev/sdh\", \"mount\": \"/hadoop/data05\"}}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdi\", \"mount\": \"/hadoop/data06\"}}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdj\", \"mount\": \"/hadoop/data07\"}}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdk\", \"mount\": \"/hadoop/data08\"}}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdl\", \"mount\": \"/hadoop/data09\"}}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'}) => {\"changed\": false, \"item\": {\"device\": \"/dev/sdm\", \"mount\": \"/hadoop/data10\"}}\u001b[0m\n", "\n", "TASK [mount : ensure /hadoop/dataX directory for mount point is present on SN.] \n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sdc\", \"mount\": \"/hadoop/tmp\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/tmp\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sdd\", \"mount\": \"/hadoop/data01\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/data01\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sde\", \"mount\": \"/hadoop/data02\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/data02\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sdf\", \"mount\": \"/hadoop/data03\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/data03\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sdg\", \"mount\": \"/hadoop/data04\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/data04\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sdh\", \"mount\": \"/hadoop/data05\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/data05\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sdi\", \"mount\": \"/hadoop/data06\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/data06\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sdj\", \"mount\": \"/hadoop/data07\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/data07\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sdk\", \"mount\": \"/hadoop/data08\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/data08\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sdl\", \"mount\": \"/hadoop/data09\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/data09\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'}) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": {\"device\": \"/dev/sdm\", \"mount\": \"/hadoop/data10\"}, \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/hadoop/data10\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\n", "TASK [mount : add mount information in fstab on SN.] ***************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdc\", \"mount\": \"/hadoop/tmp\"}, \"name\": \"/hadoop/tmp\", \"src\": \"/dev/sdc1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdd\", \"mount\": \"/hadoop/data01\"}, \"name\": \"/hadoop/data01\", \"src\": \"/dev/sdd1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sde\", \"mount\": \"/hadoop/data02\"}, \"name\": \"/hadoop/data02\", \"src\": \"/dev/sde1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdf\", \"mount\": \"/hadoop/data03\"}, \"name\": \"/hadoop/data03\", \"src\": \"/dev/sdf1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdg\", \"mount\": \"/hadoop/data04\"}, \"name\": \"/hadoop/data04\", \"src\": \"/dev/sdg1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdh\", \"mount\": \"/hadoop/data05\"}, \"name\": \"/hadoop/data05\", \"src\": \"/dev/sdh1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdi\", \"mount\": \"/hadoop/data06\"}, \"name\": \"/hadoop/data06\", \"src\": \"/dev/sdi1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdj\", \"mount\": \"/hadoop/data07\"}, \"name\": \"/hadoop/data07\", \"src\": \"/dev/sdj1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdk\", \"mount\": \"/hadoop/data08\"}, \"name\": \"/hadoop/data08\", \"src\": \"/dev/sdk1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdl\", \"mount\": \"/hadoop/data09\"}, \"name\": \"/hadoop/data09\", \"src\": \"/dev/sdl1\"}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'}) => {\"changed\": true, \"fstab\": \"/etc/fstab\", \"fstype\": \"ext4\", \"item\": {\"device\": \"/dev/sdm\", \"mount\": \"/hadoop/data10\"}, \"name\": \"/hadoop/data10\", \"src\": \"/dev/sdm1\"}\u001b[0m\n", "\n", "TASK [mount : ensure /hadoop/dataX directory for mount point is present on CN.] \n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item=/hadoop/data) => {\"changed\": false, \"item\": \"/hadoop/data\", \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "PLAY RECAP *********************************************************************\n", "\u001b[0;33mXXX.XXX.XXX.233\u001b[0m : \u001b[0;32mok\u001b[0m\u001b[0;32m=\u001b[0m\u001b[0;32m4\u001b[0m \u001b[0;33mchanged\u001b[0m\u001b[0;33m=\u001b[0m\u001b[0;33m2\u001b[0m unreachable=0 failed=0 \n", "\n" ] } ], "source": [ "!ansible-playbook -CDv -i {prereq_path}/current/hosts {prereq_path}/current/mount.yml" ] }, { "cell_type": "code", "execution_count": 44, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T07:04:28.256966", "start_time": "2016-05-17T06:57:10.422517" }, "lc_cell_meme": { "current": "a8eb0cd6-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb0df8-495a-11e8-ba2b-0242ac130002", "previous": "a8eb0bb4-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "PLAY [all] *********************************************************************\n", "\n", "TASK [setup] *******************************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [mount : mkfs for /var/log] ***********************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [mount : create a temporary directory for previous /var/log] **************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [mount : copy files in /var/log to /tmp/old-log to keep logs] *************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [mount : mount /var/log] **************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [mount : make filesystem on devices] **************************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'})\u001b[0m\n", "\n", "TASK [mount : ensure /hadoop/dataX directory for mount point is present on SN.] \n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'})\u001b[0m\n", "\n", "TASK [mount : add mount information in fstab on SN.] ***************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{hdd_devices_and_mountpoints}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdc', u'mount': u'/hadoop/tmp'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdd', u'mount': u'/hadoop/data01'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sde', u'mount': u'/hadoop/data02'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdf', u'mount': u'/hadoop/data03'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdg', u'mount': u'/hadoop/data04'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdh', u'mount': u'/hadoop/data05'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdi', u'mount': u'/hadoop/data06'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdj', u'mount': u'/hadoop/data07'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdk', u'mount': u'/hadoop/data08'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdl', u'mount': u'/hadoop/data09'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'device': u'/dev/sdm', u'mount': u'/hadoop/data10'})\u001b[0m\n", "\n", "TASK [mount : ensure /hadoop/dataX directory for mount point is present on CN.] \n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => (item=/hadoop/data) \u001b[0m\n", "\n", "PLAY RECAP *********************************************************************\n", "\u001b[0;33mXXX.XXX.XXX.233\u001b[0m : \u001b[0;32mok\u001b[0m\u001b[0;32m=\u001b[0m\u001b[0;32m4\u001b[0m \u001b[0;33mchanged\u001b[0m\u001b[0;33m=\u001b[0m\u001b[0;33m2\u001b[0m unreachable=0 failed=0 \n", "\n" ] } ], "source": [ "!ansible-playbook -i {prereq_path}/current/hosts {prereq_path}/current/mount.yml" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb0df8-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb0f10-495a-11e8-ba2b-0242ac130002", "previous": "a8eb0cd6-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Review the mountpoint..." ] }, { "cell_type": "code", "execution_count": 45, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T07:04:29.549729", "start_time": "2016-05-17T07:04:28.261823" }, "lc_cell_meme": { "current": "a8eb0f10-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1028-495a-11e8-ba2b-0242ac130002", "previous": "a8eb0df8-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.233 | SUCCESS | rc=0 >>\r\n", "Filesystem Size Used Avail Use% Mounted on\r\n", "/dev/sda2 14G 4.0G 9.3G 30% /\r\n", "tmpfs 34G 0 34G 0% /dev/shm\r\n", "/dev/sda5 726M 180M 509M 27% /mnt\r\n", "/dev/sdb1 3.0T 7.1G 2.8T 1% /var/log\r\n", "/dev/sdc1 3.0T 7.9G 2.8T 1% /hadoop/tmp\r\n", "/dev/sdd1 3.0T 77M 2.9T 1% /hadoop/data01\r\n", "/dev/sde1 3.0T 77M 2.9T 1% /hadoop/data02\r\n", "/dev/sdf1 3.0T 22G 2.8T 1% /hadoop/data03\r\n", "/dev/sdg1 3.0T 77M 2.9T 1% /hadoop/data04\r\n", "/dev/sdh1 4.0T 72M 3.8T 1% /hadoop/data05\r\n", "/dev/sdi1 3.0T 77M 2.9T 1% /hadoop/data06\r\n", "/dev/sdj1 3.0T 77M 2.9T 1% /hadoop/data07\r\n", "/dev/sdk1 3.0T 77M 2.9T 1% /hadoop/data08\r\n", "/dev/sdl1 3.0T 77M 2.9T 1% /hadoop/data09\r\n", "/dev/sdm1 3.0T 77M 2.9T 1% /hadoop/data10\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible -a 'df -H' -i {prereq_path}/current/hosts all" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb1028-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1140-495a-11e8-ba2b-0242ac130002", "previous": "a8eb0f10-495a-11e8-ba2b-0242ac130002" } }, "source": [ "# Restore the DataNode\n", "\n", "Check the health of whole HDFS..." ] }, { "cell_type": "code", "execution_count": 47, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T07:04:49.993291", "start_time": "2016-05-17T07:04:45.932175" }, "lc_cell_meme": { "current": "a8eb1140-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1258-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1028-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.200 | SUCCESS | rc=0 >>\r\n", "Configured Capacity: 238194899124224 (216.64 TB)\r\n", "Present Capacity: 226085043405016 (205.62 TB)\r\n", "DFS Remaining: 222783090728298 (202.62 TB)\r\n", "DFS Used: 3301952676718 (3.00 TB)\r\n", "DFS Used%: 1.46%\r\n", "Under replicated blocks: 0\r\n", "Blocks with corrupt replicas: 0\r\n", "Missing blocks: 0\r\n", "Missing blocks (with replication factor 1): 746\r\n", "\r\n", "-------------------------------------------------\r\n", "Live datanodes (8):\r\n", "\r\n", "Name: XXX.XXX.XXX.226:1004 (sn02022001)\r\n", "Hostname: sn02022001\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 518379108592 (482.78 GB)\r\n", "Non DFS Used: 1501183849997 (1.37 TB)\r\n", "DFS Remaining: 27508675367171 (25.02 TB)\r\n", "DFS Used%: 1.76%\r\n", "DFS Remaining%: 93.16%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 8\r\n", "Last contact: Fri Sep 02 19:19:31 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.232:1004 (sn02032001)\r\n", "Hostname: sn02032001\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 342257353961 (318.75 GB)\r\n", "Non DFS Used: 1501322791679 (1.37 TB)\r\n", "DFS Remaining: 27684658180120 (25.18 TB)\r\n", "DFS Used%: 1.16%\r\n", "DFS Remaining%: 93.76%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 13\r\n", "Last contact: Fri Sep 02 19:19:31 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.234:1004 (sn02031201)\r\n", "Hostname: sn02031201\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 545166445539 (507.73 GB)\r\n", "Non DFS Used: 1501182690661 (1.37 TB)\r\n", "DFS Remaining: 27481889189560 (24.99 TB)\r\n", "DFS Used%: 1.85%\r\n", "DFS Remaining%: 93.07%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 8\r\n", "Last contact: Fri Sep 02 19:19:30 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.228:1004 (sn02021201)\r\n", "Hostname: sn02021201\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 511456102937 (476.33 GB)\r\n", "Non DFS Used: 1501047593282 (1.37 TB)\r\n", "DFS Remaining: 27515734629541 (25.03 TB)\r\n", "DFS Used%: 1.73%\r\n", "DFS Remaining%: 93.18%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 6\r\n", "Last contact: Fri Sep 02 19:19:30 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.231:1004 (sn02032401)\r\n", "Hostname: sn02032401\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 31497230843904 (28.65 TB)\r\n", "DFS Used: 157318606848 (146.51 GB)\r\n", "Non DFS Used: 1601186369313 (1.46 TB)\r\n", "DFS Remaining: 29738725867743 (27.05 TB)\r\n", "DFS Used%: 0.50%\r\n", "DFS Remaining%: 94.42%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 8\r\n", "Last contact: Fri Sep 02 19:19:32 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.236:1004 (sn02030401)\r\n", "Hostname: sn02030401\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 146703651081 (136.63 GB)\r\n", "Non DFS Used: 1501439077195 (1.37 TB)\r\n", "DFS Remaining: 27880095597484 (25.36 TB)\r\n", "DFS Used%: 0.50%\r\n", "DFS Remaining%: 94.42%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 13\r\n", "Last contact: Fri Sep 02 19:19:30 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.225:1004 (sn02022401)\r\n", "Hostname: sn02022401\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 532531852056 (495.96 GB)\r\n", "Non DFS Used: 1501046517760 (1.37 TB)\r\n", "DFS Remaining: 27494659955944 (25.01 TB)\r\n", "DFS Used%: 1.80%\r\n", "DFS Remaining%: 93.11%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 6\r\n", "Last contact: Fri Sep 02 19:19:30 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.230:1004 (sn02020401)\r\n", "Hostname: sn02020401\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 548139555704 (510.49 GB)\r\n", "Non DFS Used: 1501446829321 (1.37 TB)\r\n", "DFS Remaining: 27478651940735 (24.99 TB)\r\n", "DFS Used%: 1.86%\r\n", "DFS Remaining%: 93.06%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 14\r\n", "Last contact: Fri Sep 02 19:19:31 JST 2016\r\n", "\r\n", "\r\n", "Dead datanodes (1):\r\n", "\r\n", "Name: XXX.XXX.XXX.233:1004 (sn02031601)\r\n", "Hostname: sn02031601\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 0 (0 B)\r\n", "DFS Used: 0 (0 B)\r\n", "Non DFS Used: 0 (0 B)\r\n", "DFS Remaining: 0 (0 B)\r\n", "DFS Used%: 100.00%\r\n", "DFS Remaining%: 0.00%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 0\r\n", "Last contact: Thu Aug 25 04:05:38 JST 2016\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible hadoop_client -s -U hdfs -a 'hdfs dfsadmin -report' -l {target_group}" ] }, { "cell_type": "markdown", "metadata": { "collapsed": true, "lc_cell_meme": { "current": "a8eb1258-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1370-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1140-495a-11e8-ba2b-0242ac130002" } }, "source": [ "## Setting up the DataNode\n", "\n", "Let's prepare the DataNode directories." ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb1370-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1492-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1258-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Ensure that the directory **/hadoop/dataXX/dfs/datadir** is present..." ] }, { "cell_type": "code", "execution_count": 48, "metadata": { "lc_cell_meme": { "current": "a8eb1492-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb15b4-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1370-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "data": { "text/plain": [ "'/tmp/tmpP0U4b9'" ] }, "execution_count": 48, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import os\n", "import tempfile\n", "\n", "work_dir = tempfile.mkdtemp()\n", "work_dir" ] }, { "cell_type": "code", "execution_count": 50, "metadata": { "lc_cell_meme": { "current": "a8eb15b4-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb16cc-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1492-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Cloning into '/tmp/tmpP0U4b9/hadoop'...\n", "remote: Counting objects: 849, done.\u001b[K\n", "remote: Total 849 (delta 0), reused 0 (delta 0), pack-reused 849\u001b[K\n", "Receiving objects: 100% (849/849), 169.06 KiB | 267.00 KiB/s, done.\n", "Resolving deltas: 100% (272/272), done.\n", "Checking connectivity... done.\n", "/tmp/tmpP0U4b9/hadoop\n", "└── playbooks\n", " ├── conf_base.retry\n", " ├── conf_base.yml\n", " ├── conf_hdfs_base.yml\n", " ├── conf_hdfs_spark.yml\n", " ├── conf_hdfs_tez.yml\n", " ├── conf_hdfs_yarn.yml\n", " ├── conf_namenode_bootstrapstandby.yml\n", " ├── conf_tez.yml\n", " ├── enter_hdfs_safemode.yml\n", " ├── format_namenode.yml\n", " ├── group_vars\n", " │   └── all\n", " │   ├── base\n", " │   ├── cgroups\n", " │   ├── collect\n", " │   ├── f500.dumpall\n", " │   ├── hbase_master\n", " │   ├── hbase_regionserver\n", " │   ├── hcatalog\n", " │   ├── hdfs_base\n", " │   ├── hdfs_spark\n", " │   ├── hdfs_tez\n", " │   ├── hdfs_yarn\n", " │   ├── hive\n", " │   ├── httpfs\n", " │   ├── hue\n", " │   ├── java7\n", " │   ├── java8\n", " │   ├── journalnode\n", " │   ├── mapreduce_history\n", " │   ├── namenode\n", " │   ├── namenode_bootstrapstandby\n", " │   ├── namenode_format\n", " │   ├── os\n", " │   ├── pig\n", " │   ├── presto_client\n", " │   ├── presto_coordinator\n", " │   ├── presto_user\n", " │   ├── presto_worker\n", " │   ├── resourcemanager\n", " │   ├── site-defaults\n", " │   ├── slavenode\n", " │   ├── spark\n", " │   ├── spark_history\n", " │   ├── spark_user\n", " │   ├── storm\n", " │   ├── tez\n", " │   └── zookeeper_server\n", " ├── install-base.yml\n", " ├── install_client.yml\n", " ├── install_hbase_master.yml\n", " ├── install_hbase_regionserver.yml\n", " ├── install_hcatalog.yml\n", " ├── install_hive.yml\n", " ├── install_httpfs.yml\n", " ├── install_hue.yml\n", " ├── install_journalnode.yml\n", " ├── install_mapreduce_history.yml\n", " ├── install_namenode.yml\n", " ├── install_pig.yml\n", " ├── install_resourcemanager.yml\n", " ├── install_slavenode.yml\n", " ├── install_spark_historyserver.yml\n", " ├── install_spark.yml\n", " ├── install_timelineservice.yml\n", " ├── install_zookeeper.yml\n", " ├── roles\n", " │   ├── base\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── conf.yml\n", " │   │   │   ├── kerberos.yml\n", " │   │   │   ├── keytab.yml\n", " │   │   │   ├── main.yml\n", " │   │   │   ├── principal.yml\n", " │   │   │   └── repo.yml\n", " │   │   └── templates\n", " │   │   ├── capacity-scheduler.xml.j2\n", " │   │   ├── container-executor.cfg.j2\n", " │   │   ├── core-site.xml.j2\n", " │   │   ├── hadoop-env.sh.j2\n", " │   │   ├── hadoop-metrics2.properties.j2\n", " │   │   ├── hadoop-metrics.properties.j2\n", " │   │   ├── hdfs-site.xml.j2\n", " │   │   ├── hdp.repo.j2\n", " │   │   ├── hosts.exclude.j2\n", " │   │   ├── hosts.list.j2\n", " │   │   ├── log4j.properties.j2\n", " │   │   ├── mapred-env.sh.j2\n", " │   │   ├── mapred-site.xml.j2\n", " │   │   ├── merge-keytabs.ktutil.j2\n", " │   │   ├── ssl-client.xml.j2\n", " │   │   ├── ssl-server.xml.j2\n", " │   │   ├── yarn-env.sh.j2\n", " │   │   ├── yarn-site.xml.j2\n", " │   │   └── zk-acl.txt.j2\n", " │   ├── cgroups\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── conf.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   ├── main.yml\n", " │   │   │   └── resource.yml\n", " │   │   └── templates\n", " │   │   ├── cgconfig.conf.j2\n", " │   │   └── cgroups.sh.j2\n", " │   ├── client\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── install.yml\n", " │   │   └── main.yml\n", " │   ├── collect\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── handlers\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── README.md\n", " │   │   ├── tasks\n", " │   │   │   └── main.yml\n", " │   │   └── vars\n", " │   │   └── main.yml\n", " │   ├── datanode_server_deletedata\n", " │   │   └── tasks\n", " │   │   ├── delete.yml\n", " │   │   └── main.yml\n", " │   ├── f500.dumpall\n", " │   │   ├── COPYING\n", " │   │   ├── COPYING.LESSER\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── README.md\n", " │   │   ├── tasks\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   └── dumpall.j2\n", " │   ├── hbase_master\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   ├── main.yml\n", " │   │   │   └── principal.yml\n", " │   │   └── templates\n", " │   │   ├── hadoop-metrics2-hbase.properties.j2\n", " │   │   ├── hbase-env.sh.j2\n", " │   │   ├── hbase-master.j2\n", " │   │   ├── hbase-policy.xml.j2\n", " │   │   ├── hbase-service-test.rb.j2\n", " │   │   ├── hbase-site.xml.j2\n", " │   │   ├── log4j.properties.j2\n", " │   │   ├── regionservers.j2\n", " │   │   └── zk-jaas.conf.j2\n", " │   ├── hbase_regionserver\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── files\n", " │   │   │   └── graceful_stop.sh\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   ├── main.yml\n", " │   │   │   └── principal.yml\n", " │   │   └── templates\n", " │   │   ├── hadoop-metrics2-hbase.properties.j2\n", " │   │   ├── hbase-env.sh.j2\n", " │   │   ├── hbase-policy.xml.j2\n", " │   │   ├── hbase-regionserver.j2\n", " │   │   ├── hbase-site.xml.j2\n", " │   │   ├── log4j.properties.j2\n", " │   │   ├── regionservers.j2\n", " │   │   └── zk-jaas.conf.j2\n", " │   ├── hcatalog\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   └── hcat-env.sh.j2\n", " │   ├── hdfs_base\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── config.yml\n", " │   │   └── main.yml\n", " │   ├── hdfs_spark\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── config.yml\n", " │   │   └── main.yml\n", " │   ├── hdfs_tez\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── config.yml\n", " │   │   └── main.yml\n", " │   ├── hdfs_yarn\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── config.yml\n", " │   │   └── main.yml\n", " │   ├── hive\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   ├── main.yml\n", " │   │   │   └── principal.yml\n", " │   │   └── templates\n", " │   │   ├── hive-exec-log4j.properties.j2\n", " │   │   ├── hive-log4j.properties.j2\n", " │   │   └── hive-site.xml.j2\n", " │   ├── httpfs\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   ├── hadoop-httpfs-default.j2\n", " │   │   ├── hadoop-httpfs.j2\n", " │   │   ├── httpfs-env.sh.j2\n", " │   │   ├── httpfs-log4j.properties.j2\n", " │   │   ├── httpfs.sh.j2\n", " │   │   ├── httpfs-signature.secret.j2\n", " │   │   └── httpfs-site.xml.j2\n", " │   ├── hue\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   ├── hue_httpd.conf.j2\n", " │   │   ├── hue.ini.j2\n", " │   │   └── log.conf.j2\n", " │   ├── java7\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── files\n", " │   │   │   ├── env_keep_javahome\n", " │   │   │   └── java.sh\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── config.yml\n", " │   │   ├── install.yml\n", " │   │   └── main.yml\n", " │   ├── java8\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── install.yml\n", " │   │   └── main.yml\n", " │   ├── journalnode\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   └── default_hadoop-hdfs-journalnode.j2\n", " │   ├── journalnode_server_createdir\n", " │   │   └── tasks\n", " │   │   ├── conf.yml\n", " │   │   └── main.yml\n", " │   ├── journalnode_server_deletedata\n", " │   │   └── tasks\n", " │   │   ├── delete.yml\n", " │   │   └── main.yml\n", " │   ├── mapreduce_history\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   └── default_hadoop-mapreduce-historyserver.j2\n", " │   ├── namenode\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   ├── default_hadoop-hdfs-namenode.j2\n", " │   │   ├── default_hadoop-hdfs-zkfc.j2\n", " │   │   ├── hdfs-balancer.sh.j2\n", " │   │   └── jaas-hdfs.conf.j2\n", " │   ├── namenode_bootstrapstandby\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── config.yml\n", " │   │   └── main.yml\n", " │   ├── namenode_format\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── config.yml\n", " │   │   └── main.yml\n", " │   ├── os\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── kernel.yml\n", " │   │   ├── limits.yml\n", " │   │   ├── main.yml\n", " │   │   └── thp.yml\n", " │   ├── pig\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   ├── log4j.properties.j2\n", " │   │   └── pig.properties.j2\n", " │   ├── presto_client\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   ├── config.properties.j2\n", " │   │   ├── hive.properties.j2\n", " │   │   ├── jvm.config.j2\n", " │   │   ├── launcher.j2\n", " │   │   ├── log.properties.j2\n", " │   │   └── node.properties.j2\n", " │   ├── presto_coordinator\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── files\n", " │   │   │   └── env_keep_prestohome\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── catalog.yml\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   ├── config.properties.j2\n", " │   │   ├── hive.properties.j2\n", " │   │   ├── jvm.config.j2\n", " │   │   ├── launcher.j2\n", " │   │   ├── log.properties.j2\n", " │   │   ├── node.properties.j2\n", " │   │   └── presto.sh.j2\n", " │   ├── prestogres\n", " │   ├── presto_user\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── main.yml\n", " │   │   └── user.yml\n", " │   ├── presto_worker\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── files\n", " │   │   │   └── env_keep_prestohome\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── catalog.yml\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   ├── config.properties.j2\n", " │   │   ├── hive.properties.j2\n", " │   │   ├── jvm.config.j2\n", " │   │   ├── launcher.j2\n", " │   │   ├── log.properties.j2\n", " │   │   ├── node.properties.j2\n", " │   │   └── presto.sh.j2\n", " │   ├── resourcemanager\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   └── default_hadoop-yarn-resourcemanager.j2\n", " │   ├── site-defaults\n", " │   │   └── defaults\n", " │   │   └── main.yml\n", " │   ├── slavenode\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   ├── default_hadoop-hdfs-datanode.j2\n", " │   │   └── default_hadoop-yarn-nodemanager.j2\n", " │   ├── spark\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── files\n", " │   │   │   └── env_keep_sparkhome\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install-tarball.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   ├── main.yml\n", " │   │   │   └── principal.yml\n", " │   │   └── templates\n", " │   │   ├── fairscheduler.xml.j2\n", " │   │   ├── log4j.properties.j2\n", " │   │   ├── metrics.properties.j2\n", " │   │   ├── spark-defaults.conf.j2\n", " │   │   ├── spark-env.sh.j2\n", " │   │   └── spark.sh.j2\n", " │   ├── spark_history\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── config.yml\n", " │   │   └── main.yml\n", " │   ├── spark_user\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   └── tasks\n", " │   │   ├── main.yml\n", " │   │   └── user.yml\n", " │   ├── storm\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── files\n", " │   │   │   ├── storm-drpc\n", " │   │   │   ├── storm-nimbus\n", " │   │   │   ├── storm.py\n", " │   │   │   ├── storm-supervisor\n", " │   │   │   └── storm-ui\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   ├── main.yml\n", " │   │   │   └── user.yml\n", " │   │   └── templates\n", " │   │   ├── storm_env.ini.j2\n", " │   │   ├── storm-env.sh.j2\n", " │   │   ├── storm-slider-env.sh.j2\n", " │   │   └── storm.yaml.j2\n", " │   ├── tez\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   └── tez-site.xml.j2\n", " │   ├── timelineservice\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   └── main.yml\n", " │   │   └── templates\n", " │   │   └── default_hadoop-yarn-timelineserver.j2\n", " │   ├── zookeeper_server\n", " │   │   ├── defaults\n", " │   │   │   └── main.yml\n", " │   │   ├── meta\n", " │   │   │   └── main.yml\n", " │   │   ├── tasks\n", " │   │   │   ├── config.yml\n", " │   │   │   ├── install.yml\n", " │   │   │   ├── main.yml\n", " │   │   │   └── principal.yml\n", " │   │   └── templates\n", " │   │   ├── jaas.conf.j2\n", " │   │   ├── log4j.properties.j2\n", " │   │   ├── myid.j2\n", " │   │   ├── zoo.cfg.j2\n", " │   │   ├── zookeeper-env.sh.j2\n", " │   │   └── zookeeper-server.j2\n", " │   └── zookeeper_server_deletedata\n", " │   └── tasks\n", " │   ├── delete.yml\n", " │   └── main.yml\n", " ├── start_datanode.yml\n", " ├── start_hbase_master.yml\n", " ├── start_hbase_regionserver.yml\n", " ├── start_hcatalog.yml\n", " ├── start_httpfs.yml\n", " ├── start_hue.yml\n", " ├── start_journalnode.yml\n", " ├── start_mapreduce_historyserver.yml\n", " ├── start_namenode.retry\n", " ├── start_namenode.yml\n", " ├── start_nodemanager.yml\n", " ├── start_resourcemanager.yml\n", " ├── start_spark_historyserver.yml\n", " ├── start_timelineservice.yml\n", " ├── start_zookeeper-server.yml\n", " ├── stop_datanode.yml\n", " ├── stop_hbase_master.yml\n", " ├── stop_hbase_regionserver.yml\n", " ├── stop_hcatalog.yml\n", " ├── stop_journalnode.yml\n", " ├── stop_mapreduce_historyserver.yml\n", " ├── stop_namenode.yml\n", " ├── stop_nodemanager.yml\n", " ├── stop_resourcemanager.yml\n", " ├── stop_spark_historyserver.yml\n", " ├── stop_timelineservice.yml\n", " ├── stop_zookeeper-server.yml\n", " ├── sync_kdc.yml\n", " └── upgrade_namenode.yml\n", "\n", "194 directories, 404 files\n" ] } ], "source": [ "!rm -fr {work_dir}/hadoop\n", "!git clone https://github.com/NII-cloud-operation/Literate-computing-Hadoop.git {work_dir}/hadoop\n", "!tree {work_dir}/hadoop" ] }, { "cell_type": "code", "execution_count": 51, "metadata": { "lc_cell_meme": { "current": "a8eb16cc-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb17e4-495a-11e8-ba2b-0242ac130002", "previous": "a8eb15b4-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "total 244\r\n", "drwxr-xr-x 4 root root 4096 Sep 2 19:20 .\r\n", "drwxr-xr-x 4 root root 4096 Sep 2 19:20 ..\r\n", "-rw-r--r-- 1 root root 13 Sep 2 19:20 conf_base.retry\r\n", "-rw-r--r-- 1 root root 39 Sep 2 19:20 conf_base.yml\r\n", "-rw-r--r-- 1 root root 136 Sep 2 19:20 conf_hdfs_base.yml\r\n", "-rw-r--r-- 1 root root 137 Sep 2 19:20 conf_hdfs_spark.yml\r\n", "-rw-r--r-- 1 root root 135 Sep 2 19:20 conf_hdfs_tez.yml\r\n", "-rw-r--r-- 1 root root 136 Sep 2 19:20 conf_hdfs_yarn.yml\r\n", "-rw-r--r-- 1 root root 188 Sep 2 19:20 conf_namenode_bootstrapstandby.yml\r\n" ] } ], "source": [ "playbook_dir = os.path.join(work_dir, 'hadoop/playbooks')\n", "!ls -la {playbook_dir} | head" ] }, { "cell_type": "code", "execution_count": 52, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T07:06:09.354640", "start_time": "2016-05-17T07:05:27.933437" }, "lc_cell_meme": { "current": "a8eb17e4-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb18fc-495a-11e8-ba2b-0242ac130002", "previous": "a8eb16cc-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Using /etc/ansible/ansible.cfg as config file\n", "\n", "PLAY [hadoop_slavenode] ********************************************************\n", "\n", "TASK [setup] *******************************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/repo.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : install_hdp_repo] *************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/yum.repos.d/hdp.repo\", \"size\": 556, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/conf.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : create_hadoop_conf_dir] *******************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf\", \"size\": 4096, \"state\": \"directory\", \"uid\": 0}\u001b[0m\n", "\n", "TASK [base : copy_conf_files] **************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=core-site.xml) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"core-site.xml\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/core-site.xml\", \"size\": 2319, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hdfs-site.xml) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"hdfs-site.xml\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/hdfs-site.xml\", \"size\": 5989, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=yarn-site.xml) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"yarn-site.xml\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/yarn-site.xml\", \"size\": 6653, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=mapred-site.xml) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"mapred-site.xml\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/mapred-site.xml\", \"size\": 2287, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hadoop-env.sh) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"hadoop-env.sh\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/hadoop-env.sh\", \"size\": 4623, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=yarn-env.sh) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"yarn-env.sh\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/yarn-env.sh\", \"size\": 4567, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=mapred-env.sh) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"mapred-env.sh\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/mapred-env.sh\", \"size\": 1639, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hadoop-metrics.properties) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"hadoop-metrics.properties\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/hadoop-metrics.properties\", \"size\": 2490, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hadoop-metrics2.properties) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"hadoop-metrics2.properties\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/hadoop-metrics2.properties\", \"size\": 2425, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=log4j.properties) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"log4j.properties\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/log4j.properties\", \"size\": 11291, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=capacity-scheduler.xml) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"capacity-scheduler.xml\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/capacity-scheduler.xml\", \"size\": 4436, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hosts.exclude) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"hosts.exclude\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/hosts.exclude\", \"size\": 2, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hosts.list) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"hosts.list\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/hosts.list\", \"size\": 99, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\n", "TASK [base : copy_secure_conf_files] *******************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=ssl-server.xml) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"ssl-server.xml\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/ssl-server.xml\", \"size\": 672, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=ssl-client.xml) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"ssl-client.xml\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/ssl-client.xml\", \"size\": 344, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=zk-acl.txt) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"zk-acl.txt\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/zk-acl.txt\", \"size\": 15, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=container-executor.cfg) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"container-executor.cfg\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/hadoop/conf/container-executor.cfg\", \"size\": 225, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/kerberos.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/principal.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : Check principal] **************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Add principal] ****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Check keytab] *****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Prepare keytab] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 494, \"group\": \"hadoop\", \"mode\": \"0400\", \"owner\": \"hdfs\", \"path\": \"/etc/hadoop/conf/hdfs-unmerged.keytab\", \"size\": 394, \"state\": \"file\", \"uid\": 492}\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/principal.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : Check principal] **************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Add principal] ****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Check keytab] *****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Prepare keytab] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 494, \"group\": \"hadoop\", \"mode\": \"0400\", \"owner\": \"mapred\", \"path\": \"/etc/hadoop/conf/mapred-unmerged.keytab\", \"size\": 404, \"state\": \"file\", \"uid\": 493}\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/principal.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : Check principal] **************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Add principal] ****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Check keytab] *****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Prepare keytab] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 494, \"group\": \"hadoop\", \"mode\": \"0400\", \"owner\": \"yarn\", \"path\": \"/etc/hadoop/conf/yarn-unmerged.keytab\", \"size\": 394, \"state\": \"file\", \"uid\": 494}\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/principal.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : Check principal] **************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Add principal] ****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Check keytab] *****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"msg\": \"remote module does not support check mode\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Prepare keytab] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 494, \"group\": \"hadoop\", \"mode\": \"0400\", \"owner\": \"hdfs\", \"path\": \"/etc/hadoop/conf/http.keytab\", \"size\": 394, \"state\": \"file\", \"uid\": 492}\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/keytab.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : check keytab] *****************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"stat\": {\"atime\": 1470633429.3369722, \"checksum\": \"48b45130e5b2f4dfd1c6b307a593708e637f4252\", \"ctime\": 1453095828.7535946, \"dev\": 2050, \"exists\": true, \"gid\": 494, \"gr_name\": \"hadoop\", \"inode\": 148503, \"isblk\": false, \"ischr\": false, \"isdir\": false, \"isfifo\": false, \"isgid\": false, \"islnk\": false, \"isreg\": true, \"issock\": false, \"isuid\": false, \"md5\": \"15bc4a0eb0e7e967dda8e346ab30a307\", \"mode\": \"0400\", \"mtime\": 1453095827.5435684, \"nlink\": 1, \"path\": \"/etc/hadoop/conf/hdfs.keytab\", \"pw_name\": \"hdfs\", \"rgrp\": false, \"roth\": false, \"rusr\": true, \"size\": 786, \"uid\": 492, \"wgrp\": false, \"woth\": false, \"wusr\": false, \"xgrp\": false, \"xoth\": false, \"xusr\": false}}\u001b[0m\n", "\n", "TASK [base : prepare_script] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : run_script] *******************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 494, \"group\": \"hadoop\", \"mode\": \"0400\", \"owner\": \"hdfs\", \"path\": \"/etc/hadoop/conf/hdfs.keytab\", \"size\": 786, \"state\": \"file\", \"uid\": 492}\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/keytab.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : check keytab] *****************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"stat\": {\"atime\": 1470633436.1891208, \"checksum\": \"d58ef68c5f9eb660eb1b325fce8a7102b132498f\", \"ctime\": 1453095834.5307198, \"dev\": 2050, \"exists\": true, \"gid\": 494, \"gr_name\": \"hadoop\", \"inode\": 148504, \"isblk\": false, \"ischr\": false, \"isdir\": false, \"isfifo\": false, \"isgid\": false, \"islnk\": false, \"isreg\": true, \"issock\": false, \"isuid\": false, \"md5\": \"0698e0628a38db48b404c7952337946e\", \"mode\": \"0400\", \"mtime\": 1453095833.2656922, \"nlink\": 1, \"path\": \"/etc/hadoop/conf/mapred.keytab\", \"pw_name\": \"mapred\", \"rgrp\": false, \"roth\": false, \"rusr\": true, \"size\": 796, \"uid\": 493, \"wgrp\": false, \"woth\": false, \"wusr\": false, \"xgrp\": false, \"xoth\": false, \"xusr\": false}}\u001b[0m\n", "\n", "TASK [base : prepare_script] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : run_script] *******************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 494, \"group\": \"hadoop\", \"mode\": \"0400\", \"owner\": \"mapred\", \"path\": \"/etc/hadoop/conf/mapred.keytab\", \"size\": 796, \"state\": \"file\", \"uid\": 493}\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/keytab.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : check keytab] *****************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"stat\": {\"atime\": 1470633443.2042727, \"checksum\": \"cea51b497249c0ed850ce31660b077517e418ad5\", \"ctime\": 1453095840.1158407, \"dev\": 2050, \"exists\": true, \"gid\": 494, \"gr_name\": \"hadoop\", \"inode\": 148505, \"isblk\": false, \"ischr\": false, \"isdir\": false, \"isfifo\": false, \"isgid\": false, \"islnk\": false, \"isreg\": true, \"issock\": false, \"isuid\": false, \"md5\": \"6edf62a839dcafb95966b9498ef69f79\", \"mode\": \"0400\", \"mtime\": 1453095838.8728137, \"nlink\": 1, \"path\": \"/etc/hadoop/conf/yarn.keytab\", \"pw_name\": \"yarn\", \"rgrp\": false, \"roth\": false, \"rusr\": true, \"size\": 786, \"uid\": 494, \"wgrp\": false, \"woth\": false, \"wusr\": false, \"xgrp\": false, \"xoth\": false, \"xusr\": false}}\u001b[0m\n", "\n", "TASK [base : prepare_script] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : run_script] *******************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 494, \"group\": \"hadoop\", \"mode\": \"0400\", \"owner\": \"yarn\", \"path\": \"/etc/hadoop/conf/yarn.keytab\", \"size\": 786, \"state\": \"file\", \"uid\": 494}\u001b[0m\n", "\n", "TASK [java7 : include] *********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/java7/tasks/install.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [java7 : check_jdk7_installed] ********************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"cmd\": \"rpm -qa|grep jdk-1.7.0_75-fcs.x86_64\", \"delta\": \"0:00:02.603238\", \"end\": \"2016-09-02 19:20:45.598545\", \"failed\": false, \"failed_when_result\": false, \"rc\": 0, \"start\": \"2016-09-02 19:20:42.995307\", \"stderr\": \"\", \"stdout\": \"jdk-1.7.0_75-fcs.x86_64\", \"stdout_lines\": [\"jdk-1.7.0_75-fcs.x86_64\"], \"warnings\": [\"Consider using yum, dnf or zypper module rather than running rpm\"]}\u001b[0m\n", "\u001b[1;35m [WARNING]: Consider using yum, dnf or zypper module rather than running rpm\n", "\u001b[0m\n", "\n", "TASK [java7 : download_oraclejdk7_by_wget] *************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [java7 : md5sum_rpm] ******************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [java7 : check_md5sum] ****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [java7 : install_oraclejdk] ***********************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233] => {\"changed\": false, \"skip_reason\": \"Conditional check failed\", \"skipped\": true}\u001b[0m\n", "\n", "TASK [java7 : include] *********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/java7/tasks/config.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [java7 : copy_bash_profile] ***********************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"checksum\": \"b60f0995dba4c2f287340a26aad343ebc97335ea\", \"dest\": \"/etc/profile.d/java.sh\", \"gid\": 0, \"group\": \"root\", \"mode\": \"0644\", \"owner\": \"root\", \"path\": \"/etc/profile.d/java.sh\", \"size\": 72, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\n", "TASK [java7 : copy_sudoers_conf_of_JAVA_HOME] **********************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"checksum\": \"e8858ea3e690a93d46a8b3f38e4d2da4f7cc8c2a\", \"dest\": \"/etc/sudoers.d/env_keep_javahome\", \"gid\": 0, \"group\": \"root\", \"mode\": \"0440\", \"owner\": \"root\", \"path\": \"/etc/sudoers.d/env_keep_javahome\", \"size\": 35, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\n", "TASK [slavenode : include] *****************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/slavenode/tasks/install.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [slavenode : install_slavenode_packages] **********************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=[u'hadoop-hdfs-datanode', u'hadoop-yarn-nodemanager', u'hadoop-mapreduce']) => {\"changed\": false, \"item\": [\"hadoop-hdfs-datanode\", \"hadoop-yarn-nodemanager\", \"hadoop-mapreduce\"], \"msg\": \"\", \"rc\": 0, \"results\": [\"hadoop-hdfs-datanode-XXX.XXX.XXX.2.3.4.0-3485.el6.noarch providing hadoop-hdfs-datanode is already installed\", \"hadoop-yarn-nodemanager-XXX.XXX.XXX.2.3.4.0-3485.el6.noarch providing hadoop-yarn-nodemanager is already installed\", \"hadoop-mapreduce-XXX.XXX.XXX.2.3.4.0-3485.el6.noarch providing hadoop-mapreduce is already installed\"]}\u001b[0m\n", "\n", "TASK [slavenode : include] *****************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/slavenode/tasks/config.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [slavenode : create_datanode_data_dir] ************************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{dfs_datanode_data_dirs}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data01/dfs/datadir) => {\"changed\": true, \"item\": \"/hadoop/data01/dfs/datadir\"}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", " \"path\": \"/hadoop/data01/dfs/datadir\", \n", "\u001b[0;31m- \"state\": \"absent\"\n", "\u001b[0m\u001b[0;32m+ \"state\": \"directory\"\n", "\u001b[0m }\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data02/dfs/datadir) => {\"changed\": true, \"item\": \"/hadoop/data02/dfs/datadir\"}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", " \"path\": \"/hadoop/data02/dfs/datadir\", \n", "\u001b[0;31m- \"state\": \"absent\"\n", "\u001b[0m\u001b[0;32m+ \"state\": \"directory\"\n", "\u001b[0m }\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=/hadoop/data03/dfs/datadir) => {\"changed\": false, \"gid\": 494, \"group\": \"hadoop\", \"item\": \"/hadoop/data03/dfs/datadir\", \"mode\": \"0700\", \"owner\": \"hdfs\", \"path\": \"/hadoop/data03/dfs/datadir\", \"size\": 4096, \"state\": \"directory\", \"uid\": 492}\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data04/dfs/datadir) => {\"changed\": true, \"item\": \"/hadoop/data04/dfs/datadir\"}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", " \"path\": \"/hadoop/data04/dfs/datadir\", \n", "\u001b[0;31m- \"state\": \"absent\"\n", "\u001b[0m\u001b[0;32m+ \"state\": \"directory\"\n", "\u001b[0m }\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data05/dfs/datadir) => {\"changed\": true, \"item\": \"/hadoop/data05/dfs/datadir\"}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", " \"path\": \"/hadoop/data05/dfs/datadir\", \n", "\u001b[0;31m- \"state\": \"absent\"\n", "\u001b[0m\u001b[0;32m+ \"state\": \"directory\"\n", "\u001b[0m }\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data06/dfs/datadir) => {\"changed\": true, \"item\": \"/hadoop/data06/dfs/datadir\"}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", " \"path\": \"/hadoop/data06/dfs/datadir\", \n", "\u001b[0;31m- \"state\": \"absent\"\n", "\u001b[0m\u001b[0;32m+ \"state\": \"directory\"\n", "\u001b[0m }\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data07/dfs/datadir) => {\"changed\": true, \"item\": \"/hadoop/data07/dfs/datadir\"}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", " \"path\": \"/hadoop/data07/dfs/datadir\", \n", "\u001b[0;31m- \"state\": \"absent\"\n", "\u001b[0m\u001b[0;32m+ \"state\": \"directory\"\n", "\u001b[0m }\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data08/dfs/datadir) => {\"changed\": true, \"item\": \"/hadoop/data08/dfs/datadir\"}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", " \"path\": \"/hadoop/data08/dfs/datadir\", \n", "\u001b[0;31m- \"state\": \"absent\"\n", "\u001b[0m\u001b[0;32m+ \"state\": \"directory\"\n", "\u001b[0m }\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data09/dfs/datadir) => {\"changed\": true, \"item\": \"/hadoop/data09/dfs/datadir\"}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", " \"path\": \"/hadoop/data09/dfs/datadir\", \n", "\u001b[0;31m- \"state\": \"absent\"\n", "\u001b[0m\u001b[0;32m+ \"state\": \"directory\"\n", "\u001b[0m }\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data10/dfs/datadir) => {\"changed\": true, \"item\": \"/hadoop/data10/dfs/datadir\"}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", " \"path\": \"/hadoop/data10/dfs/datadir\", \n", "\u001b[0;31m- \"state\": \"absent\"\n", "\u001b[0m\u001b[0;32m+ \"state\": \"directory\"\n", "\u001b[0m }\n", "\n", "TASK [slavenode : fix_init_scripts] ********************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'path': u'hadoop-hdfs', u'name': u'hadoop-hdfs-datanode'}) => {\"backup\": \"\", \"changed\": true, \"item\": {\"name\": \"hadoop-hdfs-datanode\", \"path\": \"hadoop-hdfs\"}, \"msg\": \"line added\"}\u001b[0m\n", "\u001b[0;31m--- before: /usr/hdp/XXX.XXX.XXX.0-3485/hadoop-hdfs/etc/rc.d/init.d/hadoop-hdfs-datanode (content)\n", "\u001b[0m\u001b[0;32m+++ after: /usr/hdp/XXX.XXX.XXX.0-3485/hadoop-hdfs/etc/rc.d/init.d/hadoop-hdfs-datanode (content)\n", "\u001b[0m\u001b[0;36m@@ -32,6 +32,7 @@\n", "\u001b[0m ### END INIT INFO\n", " \n", " . /lib/lsb/init-functions\n", "\u001b[0;32m+. /etc/default/hadoop-hdfs-datanode\n", "\u001b[0m \n", " BIGTOP_DEFAULTS_DIR=${BIGTOP_DEFAULTS_DIR-/etc/default}\n", " [ -n \"${BIGTOP_DEFAULTS_DIR}\" -a -r ${BIGTOP_DEFAULTS_DIR}/hadoop ] && . ${BIGTOP_DEFAULTS_DIR}/hadoop\n", "\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'path': u'hadoop-yarn', u'name': u'hadoop-yarn-nodemanager'}) => {\"backup\": \"\", \"changed\": true, \"item\": {\"name\": \"hadoop-yarn-nodemanager\", \"path\": \"hadoop-yarn\"}, \"msg\": \"line added\"}\u001b[0m\n", "\u001b[0;31m--- before: /usr/hdp/XXX.XXX.XXX.0-3485/hadoop-yarn/etc/rc.d/init.d/hadoop-yarn-nodemanager (content)\n", "\u001b[0m\u001b[0;32m+++ after: /usr/hdp/XXX.XXX.XXX.0-3485/hadoop-yarn/etc/rc.d/init.d/hadoop-yarn-nodemanager (content)\n", "\u001b[0m\u001b[0;36m@@ -32,6 +32,7 @@\n", "\u001b[0m ### END INIT INFO\n", " \n", " . /lib/lsb/init-functions\n", "\u001b[0;32m+. /etc/default/hadoop-yarn-nodemanager\n", "\u001b[0m \n", " BIGTOP_DEFAULTS_DIR=${BIGTOP_DEFAULTS_DIR-/etc/default}\n", " [ -n \"${BIGTOP_DEFAULTS_DIR}\" -a -r ${BIGTOP_DEFAULTS_DIR}/hadoop ] && . ${BIGTOP_DEFAULTS_DIR}/hadoop\n", "\n", "\n", "TASK [slavenode : create_symbolic_link_to/etc/init.d] **************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'path': u'hadoop-hdfs', u'name': u'hadoop-hdfs-datanode'}) => {\"changed\": false, \"dest\": \"/etc/init.d/hadoop-hdfs-datanode\", \"gid\": 0, \"group\": \"root\", \"item\": {\"name\": \"hadoop-hdfs-datanode\", \"path\": \"hadoop-hdfs\"}, \"mode\": \"0777\", \"owner\": \"root\", \"size\": 70, \"src\": \"/usr/hdp/XXX.XXX.XXX.0-3485/hadoop-hdfs/etc/rc.d/init.d/hadoop-hdfs-datanode\", \"state\": \"link\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'path': u'hadoop-yarn', u'name': u'hadoop-yarn-nodemanager'}) => {\"changed\": false, \"dest\": \"/etc/init.d/hadoop-yarn-nodemanager\", \"gid\": 0, \"group\": \"root\", \"item\": {\"name\": \"hadoop-yarn-nodemanager\", \"path\": \"hadoop-yarn\"}, \"mode\": \"0777\", \"owner\": \"root\", \"size\": 73, \"src\": \"/usr/hdp/XXX.XXX.XXX.0-3485/hadoop-yarn/etc/rc.d/init.d/hadoop-yarn-nodemanager\", \"state\": \"link\", \"uid\": 0}\u001b[0m\n", "\n", "TASK [slavenode : create_holder_directory_for_hadoop_tmp_dir] ******************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 493, \"group\": \"yarn\", \"mode\": \"0755\", \"owner\": \"yarn\", \"path\": \"/hadoop/tmp\", \"size\": 4096, \"state\": \"directory\", \"uid\": 494}\u001b[0m\n", "\n", "TASK [slavenode : create_yarn_pid_dir] *****************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => {\"changed\": false, \"gid\": 493, \"group\": \"yarn\", \"mode\": \"0755\", \"owner\": \"yarn\", \"path\": \"/var/run/hadoop-yarn\", \"size\": 4096, \"state\": \"directory\", \"uid\": 494}\u001b[0m\n", "\n", "TASK [slavenode : create_hdfs_log_dir] *****************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => {\"changed\": true, \"gid\": 491, \"group\": \"hdfs\", \"mode\": \"0755\", \"owner\": \"hdfs\", \"path\": \"/var/log/hadoop-hdfs\", \"size\": 4096, \"state\": \"directory\", \"uid\": 492}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", "\u001b[0;31m- \"group\": 491, \n", "\u001b[0m\u001b[0;32m+ \"group\": 494, \n", "\u001b[0m \"path\": \"/var/log/hadoop-hdfs\"\n", " }\n", "\n", "TASK [slavenode : create_yarn_log_dir] *****************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => {\"changed\": true, \"gid\": 493, \"group\": \"yarn\", \"mode\": \"0755\", \"owner\": \"yarn\", \"path\": \"/var/log/hadoop-yarn\", \"size\": 4096, \"state\": \"directory\", \"uid\": 494}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", "\u001b[0;31m- \"group\": 493, \n", "\u001b[0m\u001b[0;32m+ \"group\": 494, \n", "\u001b[0m \"path\": \"/var/log/hadoop-yarn\"\n", " }\n", "\n", "TASK [slavenode : copy_defaults_file] ******************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hadoop-hdfs-datanode) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"hadoop-hdfs-datanode\", \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/etc/default/hadoop-hdfs-datanode\", \"size\": 1246, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hadoop-yarn-nodemanager) => {\"changed\": false, \"gid\": 0, \"group\": \"root\", \"item\": \"hadoop-yarn-nodemanager\", \"mode\": \"0755\", \"owner\": \"root\", \"path\": \"/etc/default/hadoop-yarn-nodemanager\", \"size\": 1000, \"state\": \"file\", \"uid\": 0}\u001b[0m\n", "\n", "TASK [slavenode : ensure_version_link] *****************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => {\"changed\": true, \"dest\": \"/usr/hdp/current/hadoop-yarn\", \"src\": \"/usr/hdp/XXX.XXX.XXX.0-3485/hadoop-yarn\", \"state\": \"absent\"}\u001b[0m\n", "\u001b[0;31m--- before\n", "\u001b[0m\u001b[0;32m+++ after\n", "\u001b[0m\u001b[0;36m@@ -1,4 +1,4 @@\n", "\u001b[0m {\n", " \"path\": \"/usr/hdp/current/hadoop-yarn\", \n", "\u001b[0;31m- \"state\": \"absent\"\n", "\u001b[0m\u001b[0;32m+ \"state\": \"link\"\n", "\u001b[0m }\n", "\n", "PLAY RECAP *********************************************************************\n", "\u001b[0;33mXXX.XXX.XXX.233\u001b[0m : \u001b[0;32mok\u001b[0m\u001b[0;32m=\u001b[0m\u001b[0;32m42\u001b[0m \u001b[0;33mchanged\u001b[0m\u001b[0;33m=\u001b[0m\u001b[0;33m5\u001b[0m unreachable=0 failed=0 \n", "\n" ] } ], "source": [ "!ansible-playbook -CDv {playbook_dir}/install_slavenode.yml -l { host_new_machine }" ] }, { "cell_type": "code", "execution_count": 53, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T07:06:55.102335", "start_time": "2016-05-17T07:06:13.837343" }, "lc_cell_meme": { "current": "a8eb18fc-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1a14-495a-11e8-ba2b-0242ac130002", "previous": "a8eb17e4-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "PLAY [hadoop_slavenode] ********************************************************\n", "\n", "TASK [setup] *******************************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/repo.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : install_hdp_repo] *************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/conf.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : create_hadoop_conf_dir] *******************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : copy_conf_files] **************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=core-site.xml)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hdfs-site.xml)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=yarn-site.xml)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=mapred-site.xml)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hadoop-env.sh)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=yarn-env.sh)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=mapred-env.sh)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hadoop-metrics.properties)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hadoop-metrics2.properties)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=log4j.properties)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=capacity-scheduler.xml)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hosts.exclude)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hosts.list)\u001b[0m\n", "\n", "TASK [base : copy_secure_conf_files] *******************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=ssl-server.xml)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=ssl-client.xml)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=zk-acl.txt)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=container-executor.cfg)\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/kerberos.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/principal.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : Check principal] **************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Add principal] ****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Check keytab] *****************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Prepare keytab] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/principal.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : Check principal] **************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Add principal] ****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Check keytab] *****************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Prepare keytab] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/principal.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : Check principal] **************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Add principal] ****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Check keytab] *****************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Prepare keytab] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/principal.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : Check principal] **************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Add principal] ****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Check keytab] *****************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Prepare keytab] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/keytab.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : check keytab] *****************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : prepare_script] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : run_script] *******************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/keytab.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : check keytab] *****************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : prepare_script] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : run_script] *******************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : include] **********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/base/tasks/keytab.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [base : check keytab] *****************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : prepare_script] ***************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : run_script] *******************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [base : Modify permissions of keytab] *************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [java7 : include] *********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/java7/tasks/install.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [java7 : check_jdk7_installed] ********************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\u001b[1;35m [WARNING]: Consider using yum, dnf or zypper module rather than running rpm\n", "\u001b[0m\n", "\n", "TASK [java7 : download_oraclejdk7_by_wget] *************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [java7 : md5sum_rpm] ******************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [java7 : check_md5sum] ****************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [java7 : install_oraclejdk] ***********************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [java7 : include] *********************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/java7/tasks/config.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [java7 : copy_bash_profile] ***********************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [java7 : copy_sudoers_conf_of_JAVA_HOME] **********************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [slavenode : include] *****************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/slavenode/tasks/install.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [slavenode : install_slavenode_packages] **********************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=[u'hadoop-hdfs-datanode', u'hadoop-yarn-nodemanager', u'hadoop-mapreduce'])\u001b[0m\n", "\n", "TASK [slavenode : include] *****************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/slavenode/tasks/config.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [slavenode : create_datanode_data_dir] ************************************\n", "\u001b[0;35m[DEPRECATION WARNING]: Using bare variables is deprecated. Update your playbooks\n", " so that the environment value uses the full variable syntax \n", "('{{dfs_datanode_data_dirs}}').\n", "This feature will be removed in a future \n", "release. Deprecation warnings can be disabled by setting \n", "deprecation_warnings=False in ansible.cfg.\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data01/dfs/datadir)\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data02/dfs/datadir)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=/hadoop/data03/dfs/datadir)\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data04/dfs/datadir)\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data05/dfs/datadir)\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data06/dfs/datadir)\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data07/dfs/datadir)\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data08/dfs/datadir)\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data09/dfs/datadir)\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item=/hadoop/data10/dfs/datadir)\u001b[0m\n", "\n", "TASK [slavenode : fix_init_scripts] ********************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'path': u'hadoop-hdfs', u'name': u'hadoop-hdfs-datanode'})\u001b[0m\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233] => (item={u'path': u'hadoop-yarn', u'name': u'hadoop-yarn-nodemanager'})\u001b[0m\n", "\n", "TASK [slavenode : create_symbolic_link_to/etc/init.d] **************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'path': u'hadoop-hdfs', u'name': u'hadoop-hdfs-datanode'})\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item={u'path': u'hadoop-yarn', u'name': u'hadoop-yarn-nodemanager'})\u001b[0m\n", "\n", "TASK [slavenode : create_holder_directory_for_hadoop_tmp_dir] ******************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [slavenode : create_yarn_pid_dir] *****************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [slavenode : create_hdfs_log_dir] *****************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [slavenode : create_yarn_log_dir] *****************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [slavenode : copy_defaults_file] ******************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hadoop-hdfs-datanode)\u001b[0m\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=hadoop-yarn-nodemanager)\u001b[0m\n", "\n", "TASK [slavenode : ensure_version_link] *****************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "PLAY RECAP *********************************************************************\n", "\u001b[0;33mXXX.XXX.XXX.233\u001b[0m : \u001b[0;32mok\u001b[0m\u001b[0;32m=\u001b[0m\u001b[0;32m50\u001b[0m \u001b[0;33mchanged\u001b[0m\u001b[0;33m=\u001b[0m\u001b[0;33m5\u001b[0m unreachable=0 failed=0 \n", "\n" ] } ], "source": [ "!ansible-playbook {playbook_dir}/install_slavenode.yml -l { host_new_machine }" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb1a14-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1b2c-495a-11e8-ba2b-0242ac130002", "previous": "a8eb18fc-495a-11e8-ba2b-0242ac130002" } }, "source": [ "## Remove the node from the decommission list\n", "\n", "Remove the DataNode from decommission list..." ] }, { "cell_type": "code", "execution_count": 54, "metadata": { "collapsed": true, "lc_cell_meme": { "current": "a8eb1b2c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1c44-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1a14-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "!cp group_vars/hadoop_all_cluster1 {work_dir}/hadoop_all_cluster1_old" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb1c44-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1d5c-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1b2c-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Edit [hadoop_all_cluster1](../edit/group_vars/hadoop_all_cluster1) and remove it from `datanode_decommission_nodes`\n", "\n", "It has not been decommissioned, skipped." ] }, { "cell_type": "code", "execution_count": 55, "metadata": { "lc_cell_meme": { "current": "a8eb1d5c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1e74-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1c44-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "#!diff -ur {work_dir}/hadoop_all_cluster1_old group_vars/hadoop_all_cluster1" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb1e74-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb1f96-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1d5c-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Deliver latest configurations to all nodes..." ] }, { "cell_type": "code", "execution_count": 56, "metadata": { "lc_cell_meme": { "current": "a8eb1f96-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb20a4-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1e74-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "#!ansible-playbook -CDv {playbook_dir}/conf_base.yml -l {target_group}" ] }, { "cell_type": "code", "execution_count": 57, "metadata": { "lc_cell_meme": { "current": "a8eb20a4-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb21c6-495a-11e8-ba2b-0242ac130002", "previous": "a8eb1f96-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "#!ansible-playbook {playbook_dir}/conf_base.yml -l {target_group}" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb21c6-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb22de-495a-11e8-ba2b-0242ac130002", "previous": "a8eb20a4-495a-11e8-ba2b-0242ac130002" } }, "source": [ "OK, I will ask NameNodes to reload node settings..." ] }, { "cell_type": "code", "execution_count": 58, "metadata": { "lc_cell_meme": { "current": "a8eb22de-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb23f6-495a-11e8-ba2b-0242ac130002", "previous": "a8eb21c6-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "#!ansible hadoop_namenode -m shell -a 'hdfs dfsadmin -fs hdfs://$(hostname):8020 -refreshNodes' --sudo --sudo-user hdfs -l {target_group}" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb23f6-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb2504-495a-11e8-ba2b-0242ac130002", "previous": "a8eb22de-495a-11e8-ba2b-0242ac130002" } }, "source": [ "## cgroups\n", "\n", "Start cgroups..." ] }, { "cell_type": "code", "execution_count": 59, "metadata": { "lc_cell_meme": { "current": "a8eb2504-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb261c-495a-11e8-ba2b-0242ac130002", "previous": "a8eb23f6-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "PLAY [hadoop_all] **************************************************************\n", "\n", "TASK [setup] *******************************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [os : include] ************************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/os/tasks/limits.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [os : set_nofile_soft_limit] **********************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [os : set_nofile_hard_limit] **********************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [os : set_core_soft_limit] ************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [os : set_core_hard_limit] ************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [os : include] ************************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/os/tasks/thp.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [os : set_transparent_hugepage] *******************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [os : include] ************************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/os/tasks/kernel.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [os : set_local_port_range] ***********************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [os : set_somaxconn] ******************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [cgroups : include] *******************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/cgroups/tasks/install.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [cgroups : install_cgroups] ***********************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=[u'libcgroup', u'libcgroup-devel'])\u001b[0m\n", "\n", "TASK [cgroups : include] *******************************************************\n", "\u001b[0;36mincluded: /tmp/tmpP0U4b9/hadoop/playbooks/roles/cgroups/tasks/conf.yml for XXX.XXX.XXX.233\u001b[0m\n", "\n", "TASK [cgroups : create_direcoty_cgroups_scripts] *******************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [cgroups : copy_cgroups_scripts] ******************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233] => (item=cgroups.sh)\u001b[0m\n", "\n", "TASK [cgroups : copy_cgconfig.conf] ********************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [cgroups : check_chkconfig_cgconfig] **************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [cgroups : set_on_to_cgconfig_of_chkconfig] *******************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [cgroups : started_cgconfig] **********************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [cgroups : reboot] ********************************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [cgroups : wait for SSH port down] ****************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [cgroups : wait for SSH port up] ******************************************\n", "\u001b[0;36mskipping: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [cgroups : started_cgconfig] **********************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "PLAY RECAP *********************************************************************\n", "\u001b[0;33mXXX.XXX.XXX.233\u001b[0m : \u001b[0;32mok\u001b[0m\u001b[0;32m=\u001b[0m\u001b[0;32m20\u001b[0m \u001b[0;33mchanged\u001b[0m\u001b[0;33m=\u001b[0m\u001b[0;33m1\u001b[0m unreachable=0 failed=0 \n", "\n" ] } ], "source": [ "!ansible-playbook {playbook_dir}/install-base.yml -l { host_new_machine }" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb261c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb2734-495a-11e8-ba2b-0242ac130002", "previous": "a8eb2504-495a-11e8-ba2b-0242ac130002" } }, "source": [ "## Start the DataNode\n", "\n", "Start the DataNode..." ] }, { "cell_type": "code", "execution_count": 60, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T07:07:27.460655", "start_time": "2016-05-17T07:07:14.818874" }, "lc_cell_meme": { "current": "a8eb2734-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb2856-495a-11e8-ba2b-0242ac130002", "previous": "a8eb261c-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "PLAY [hadoop_slavenode] ********************************************************\n", "\n", "TASK [setup] *******************************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [start_hadoop-hdfs-datanode] **********************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "PLAY RECAP *********************************************************************\n", "\u001b[0;33mXXX.XXX.XXX.233\u001b[0m : \u001b[0;32mok\u001b[0m\u001b[0;32m=\u001b[0m\u001b[0;32m2\u001b[0m \u001b[0;33mchanged\u001b[0m\u001b[0;33m=\u001b[0m\u001b[0;33m1\u001b[0m unreachable=0 failed=0 \n", "\n" ] } ], "source": [ "!ansible-playbook {playbook_dir}/start_datanode.yml -l { host_new_machine }" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb2856-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb2964-495a-11e8-ba2b-0242ac130002", "previous": "a8eb2734-495a-11e8-ba2b-0242ac130002" } }, "source": [ "OK. Checking the health..." ] }, { "cell_type": "code", "execution_count": 61, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T07:07:36.581623", "start_time": "2016-05-17T07:07:32.537164" }, "lc_cell_meme": { "current": "a8eb2964-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb2a7c-495a-11e8-ba2b-0242ac130002", "previous": "a8eb2856-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.200 | SUCCESS | rc=0 >>\r\n", "Configured Capacity: 268707633709056 (244.39 TB)\r\n", "Present Capacity: 255047025981280 (231.96 TB)\r\n", "DFS Remaining: 251740821216440 (228.96 TB)\r\n", "DFS Used: 3306204764840 (3.01 TB)\r\n", "DFS Used%: 1.30%\r\n", "Under replicated blocks: 0\r\n", "Blocks with corrupt replicas: 0\r\n", "Missing blocks: 0\r\n", "Missing blocks (with replication factor 1): 746\r\n", "\r\n", "-------------------------------------------------\r\n", "Live datanodes (9):\r\n", "\r\n", "Name: XXX.XXX.XXX.226:1004 (sn02022001)\r\n", "Hostname: sn02022001\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 516681058899 (481.20 GB)\r\n", "Non DFS Used: 1501183657196 (1.37 TB)\r\n", "DFS Remaining: 27510373609665 (25.02 TB)\r\n", "DFS Used%: 1.75%\r\n", "DFS Remaining%: 93.17%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 8\r\n", "Last contact: Fri Sep 02 19:24:04 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.232:1004 (sn02032001)\r\n", "Hostname: sn02032001\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 342266323665 (318.76 GB)\r\n", "Non DFS Used: 1501317738175 (1.37 TB)\r\n", "DFS Remaining: 27684654263920 (25.18 TB)\r\n", "DFS Used%: 1.16%\r\n", "DFS Remaining%: 93.76%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 13\r\n", "Last contact: Fri Sep 02 19:24:04 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.233:1004 (sn02031601)\r\n", "Hostname: sn02031601\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 30512734584832 (27.75 TB)\r\n", "DFS Used: 21571022848 (20.09 GB)\r\n", "Non DFS Used: 1550786138112 (1.41 TB)\r\n", "DFS Remaining: 28940377423872 (26.32 TB)\r\n", "DFS Used%: 0.07%\r\n", "DFS Remaining%: 94.85%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 1\r\n", "Last contact: Fri Sep 02 19:24:03 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.234:1004 (sn02031201)\r\n", "Hostname: sn02031201\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 540451153248 (503.33 GB)\r\n", "Non DFS Used: 1501181450926 (1.37 TB)\r\n", "DFS Remaining: 27486605721586 (25.00 TB)\r\n", "DFS Used%: 1.83%\r\n", "DFS Remaining%: 93.09%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 8\r\n", "Last contact: Fri Sep 02 19:24:03 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.228:1004 (sn02021201)\r\n", "Hostname: sn02021201\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 510509822423 (475.45 GB)\r\n", "Non DFS Used: 1501046187970 (1.37 TB)\r\n", "DFS Remaining: 27516682315367 (25.03 TB)\r\n", "DFS Used%: 1.73%\r\n", "DFS Remaining%: 93.19%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 6\r\n", "Last contact: Fri Sep 02 19:24:03 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.231:1004 (sn02032401)\r\n", "Hostname: sn02032401\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 31497230843904 (28.65 TB)\r\n", "DFS Used: 157319332033 (146.52 GB)\r\n", "Non DFS Used: 1601186356193 (1.46 TB)\r\n", "DFS Remaining: 29738725155678 (27.05 TB)\r\n", "DFS Used%: 0.50%\r\n", "DFS Remaining%: 94.42%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 8\r\n", "Last contact: Fri Sep 02 19:24:05 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.236:1004 (sn02030401)\r\n", "Hostname: sn02030401\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 146715107328 (136.64 GB)\r\n", "Non DFS Used: 1501427626954 (1.37 TB)\r\n", "DFS Remaining: 27880095591478 (25.36 TB)\r\n", "DFS Used%: 0.50%\r\n", "DFS Remaining%: 94.42%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 12\r\n", "Last contact: Fri Sep 02 19:24:03 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.225:1004 (sn02022401)\r\n", "Hostname: sn02022401\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 530144455857 (493.74 GB)\r\n", "Non DFS Used: 1501045214254 (1.37 TB)\r\n", "DFS Remaining: 27497048655649 (25.01 TB)\r\n", "DFS Used%: 1.80%\r\n", "DFS Remaining%: 93.12%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 6\r\n", "Last contact: Fri Sep 02 19:24:03 JST 2016\r\n", "\r\n", "\r\n", "Name: XXX.XXX.XXX.230:1004 (sn02020401)\r\n", "Hostname: sn02020401\r\n", "Decommission Status : Normal\r\n", "Configured Capacity: 29528238325760 (26.86 TB)\r\n", "DFS Used: 540546488539 (503.42 GB)\r\n", "Non DFS Used: 1501433357996 (1.37 TB)\r\n", "DFS Remaining: 27486258479225 (25.00 TB)\r\n", "DFS Used%: 1.83%\r\n", "DFS Remaining%: 93.08%\r\n", "Configured Cache Capacity: 0 (0 B)\r\n", "Cache Used: 0 (0 B)\r\n", "Cache Remaining: 0 (0 B)\r\n", "Cache Used%: 100.00%\r\n", "Cache Remaining%: 0.00%\r\n", "Xceivers: 14\r\n", "Last contact: Fri Sep 02 19:24:04 JST 2016\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible hadoop_client -s -U hdfs -a 'hdfs dfsadmin -report' -l {target_group}" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb2a7c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb2b94-495a-11e8-ba2b-0242ac130002", "previous": "a8eb2964-495a-11e8-ba2b-0242ac130002" } }, "source": [ "OK, It seems that the cluster is HELATHY.\n", "\n", "> Though `Missing blocks (with replication factor 1)` of the sample output shows `746`, it is caused by [known issue HDFS-8806](https://issues.apache.org/jira/browse/HDFS-8806).\n", "> We can ignore the counter.\n", "\n", "Additionaly, I would like to check the result of fsck..." ] }, { "cell_type": "code", "execution_count": 62, "metadata": { "ExecuteTime": { "end_time": "2016-05-17T07:08:15.562675", "start_time": "2016-05-17T07:08:11.426205" }, "lc_cell_meme": { "current": "a8eb2b94-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb2cac-495a-11e8-ba2b-0242ac130002", "previous": "a8eb2a7c-495a-11e8-ba2b-0242ac130002" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.200 | SUCCESS | rc=0 >>\r\n", "FSCK started by hdfs (auth:KERBEROS_SSL) from /XXX.XXX.XXX.200 for path / at Fri Sep 02 19:24:30 JST 2016\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "....................................................................................................\r\n", "...................................................................Status: HEALTHY\r\n", " Total size:\t1091981477852 B (Total open files size: 830 B)\r\n", " Total dirs:\t1414\r\n", " Total files:\t16167\r\n", " Total symlinks:\t\t0 (Files currently being written: 11)\r\n", " Total blocks (validated):\t23619 (avg. block size 46233179 B) (Total open file blocks (not validated): 10)\r\n", " Minimally replicated blocks:\t23619 (100.0 %)\r\n", " Over-replicated blocks:\t0 (0.0 %)\r\n", " Under-replicated blocks:\t0 (0.0 %)\r\n", " Mis-replicated blocks:\t\t0 (0.0 %)\r\n", " Default replication factor:\t3\r\n", " Average block replication:\t3.0\r\n", " Corrupt blocks:\t\t0\r\n", " Missing replicas:\t\t0 (0.0 %)\r\n", " Number of data-nodes:\t\t9\r\n", " Number of racks:\t\t1\r\n", "FSCK ended at Fri Sep 02 19:24:30 JST 2016 in 303 milliseconds\r\n", "\r\n", "\r\n", "The filesystem under path '/' is HEALTHYConnecting to namenode via https://cn01070401:50470/fsck?ugi=hdfs&path=%2F\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible hadoop_client -s -U hdfs -a 'hdfs fsck /' -l {target_group}" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb2cac-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb36fc-495a-11e8-ba2b-0242ac130002", "previous": "a8eb2b94-495a-11e8-ba2b-0242ac130002" } }, "source": [ "## Start the NodeManager" ] }, { "cell_type": "code", "execution_count": 63, "metadata": { "lc_cell_meme": { "current": "a8eb36fc-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb385a-495a-11e8-ba2b-0242ac130002", "previous": "a8eb2cac-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "PLAY [hadoop_slavenode] ********************************************************\n", "\n", "TASK [setup] *******************************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [start_hadoop-yarn-nodemanager] *******************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "PLAY RECAP *********************************************************************\n", "\u001b[0;33mXXX.XXX.XXX.233\u001b[0m : \u001b[0;32mok\u001b[0m\u001b[0;32m=\u001b[0m\u001b[0;32m2\u001b[0m \u001b[0;33mchanged\u001b[0m\u001b[0;33m=\u001b[0m\u001b[0;33m1\u001b[0m unreachable=0 failed=0 \n", "\n" ] } ], "source": [ "!ansible-playbook {playbook_dir}/start_nodemanager.yml -l { host_new_machine }" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb385a-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb397c-495a-11e8-ba2b-0242ac130002", "previous": "a8eb36fc-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Confirm the YARN nodes..." ] }, { "cell_type": "code", "execution_count": 64, "metadata": { "lc_cell_meme": { "current": "a8eb397c-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb3a94-495a-11e8-ba2b-0242ac130002", "previous": "a8eb385a-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0;32mXXX.XXX.XXX.200 | SUCCESS | rc=0 >>\r\n", "Total Nodes:9\r\n", " Node-Id\t Node-State\tNode-Http-Address\tNumber-of-Running-Containers\r\n", "sn02030401:45454\t RUNNING\t sn02030401:8044\t 0\r\n", "sn02032001:45454\t RUNNING\t sn02032001:8044\t 0\r\n", "sn02021201:45454\t RUNNING\t sn02021201:8044\t 0\r\n", "sn02022001:45454\t RUNNING\t sn02022001:8044\t 0\r\n", "sn02022401:45454\t RUNNING\t sn02022401:8044\t 0\r\n", "sn02031601:45454\t RUNNING\t sn02031601:8044\t 0\r\n", "sn02020401:45454\t RUNNING\t sn02020401:8044\t 0\r\n", "sn02031201:45454\t RUNNING\t sn02031201:8044\t 0\r\n", "sn02032401:45454\t RUNNING\t sn02032401:8044\t 016/09/02 19:24:52 INFO impl.TimelineClientImpl: Timeline service address: https://cn01070403:8190/ws/v1/timeline/\r\n", "\u001b[0m\r\n" ] } ], "source": [ "!ansible hadoop_client -s -U yarn -a 'yarn node -list' -l {target_group}" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb3a94-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb3bb6-495a-11e8-ba2b-0242ac130002", "previous": "a8eb397c-495a-11e8-ba2b-0242ac130002" } }, "source": [ "**sn02031601** is running, OK." ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb3bb6-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb3cce-495a-11e8-ba2b-0242ac130002", "previous": "a8eb3a94-495a-11e8-ba2b-0242ac130002" } }, "source": [ "## Start the RegionServer" ] }, { "cell_type": "code", "execution_count": 65, "metadata": { "lc_cell_meme": { "current": "a8eb3cce-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb3df0-495a-11e8-ba2b-0242ac130002", "previous": "a8eb3bb6-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "PLAY [hadoop_hbase_regionserver] ***********************************************\n", "\n", "TASK [setup] *******************************************************************\n", "\u001b[0;32mok: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "TASK [start_hbase-regionserver] ************************************************\n", "\u001b[0;33mchanged: [XXX.XXX.XXX.233]\u001b[0m\n", "\n", "PLAY RECAP *********************************************************************\n", "\u001b[0;33mXXX.XXX.XXX.233\u001b[0m : \u001b[0;32mok\u001b[0m\u001b[0;32m=\u001b[0m\u001b[0;32m2\u001b[0m \u001b[0;33mchanged\u001b[0m\u001b[0;33m=\u001b[0m\u001b[0;33m1\u001b[0m unreachable=0 failed=0 \n", "\n" ] } ], "source": [ "!ansible-playbook {playbook_dir}/start_hbase_regionserver.yml -l { host_new_machine }" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb3df0-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb3efe-495a-11e8-ba2b-0242ac130002", "previous": "a8eb3cce-495a-11e8-ba2b-0242ac130002" } }, "source": [ "Confirm the Region Servers..." ] }, { "cell_type": "code", "execution_count": 66, "metadata": { "lc_cell_meme": { "current": "a8eb3efe-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb4016-495a-11e8-ba2b-0242ac130002", "previous": "a8eb3df0-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "data": { "text/plain": [ "'XXX.XXX.XXX.197'" ] }, "execution_count": 66, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from kazoo.client import KazooClient\n", "zk_stdout = !ansible -l {target_group} -m ping hadoop_zookeeperserver\n", "zk_hosts = [line.split()[0] for line in zk_stdout if \"SUCCESS\" in line]\n", "zk = KazooClient(hosts='%s:2181' % zk_hosts[0], read_only=True)\n", "zk.start()\n", "(master_result,v) = zk.get(\"/hbase/master\")\n", "zk.stop()\n", "active_master = None\n", "for m in filter(lambda m: m['HBase Master'], machines):\n", " if m['Name'] in master_result:\n", " active_master = m['Service IP']\n", "active_master" ] }, { "cell_type": "code", "execution_count": 67, "metadata": { "lc_cell_meme": { "current": "a8eb4016-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb412e-495a-11e8-ba2b-0242ac130002", "previous": "a8eb3efe-495a-11e8-ba2b-0242ac130002" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "sn02020401,16020,1468567893041\r\n", "sn02021201,16020,1468567892900\r\n", "sn02022001,16020,1468567893194\r\n", "sn02022401,16020,1468567894744\r\n", "sn02030401,16020,1470634628613\r\n", "sn02031201,16020,1458102803012\r\n", "sn02031601,16020,1472811915179\r\n", "sn02032001,16020,1469169835467\r\n", "sn02032401,16020,1470720586071\r\n" ] } ], "source": [ "!zk-shell { zk_hosts[0] } --run-once \"ls /hbase/rs\"" ] }, { "cell_type": "markdown", "metadata": { "lc_cell_meme": { "current": "a8eb412e-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb4246-495a-11e8-ba2b-0242ac130002", "previous": "a8eb4016-495a-11e8-ba2b-0242ac130002" } }, "source": [ "**sn02031601** is included in the list of Region Servers, OK.\n", "\n", "It's done." ] }, { "cell_type": "markdown", "metadata": { "collapsed": true, "lc_cell_meme": { "current": "a8eb4246-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb4354-495a-11e8-ba2b-0242ac130002", "previous": "a8eb412e-495a-11e8-ba2b-0242ac130002" } }, "source": [ "# Cleanup\n", "\n", "Remove temporary files..." ] }, { "cell_type": "code", "execution_count": 68, "metadata": { "lc_cell_meme": { "current": "a8eb4354-495a-11e8-ba2b-0242ac130002", "history": [], "next": "a8eb446c-495a-11e8-ba2b-0242ac130002", "previous": "a8eb4246-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [ "!rm -fr {work_dir}" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true, "lc_cell_meme": { "current": "a8eb446c-495a-11e8-ba2b-0242ac130002", "history": [], "next": null, "previous": "a8eb4354-495a-11e8-ba2b-0242ac130002" } }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.9" }, "lc_notebook_meme": { "current": "a8eace06-495a-11e8-ba2b-0242ac130002", "history": [], "root_cells": [ "a8ead072-495a-11e8-ba2b-0242ac130002", "a8ead1da-495a-11e8-ba2b-0242ac130002", "a8ead310-495a-11e8-ba2b-0242ac130002", "a8ead446-495a-11e8-ba2b-0242ac130002", "a8ead568-495a-11e8-ba2b-0242ac130002", "a8ead68a-495a-11e8-ba2b-0242ac130002", "a8ead7ac-495a-11e8-ba2b-0242ac130002", "a8ead8ce-495a-11e8-ba2b-0242ac130002", "a8ead9f0-495a-11e8-ba2b-0242ac130002", "a8eadb08-495a-11e8-ba2b-0242ac130002", "a8eadc20-495a-11e8-ba2b-0242ac130002", "a8eadd42-495a-11e8-ba2b-0242ac130002", "a8eade64-495a-11e8-ba2b-0242ac130002", "a8eadf86-495a-11e8-ba2b-0242ac130002", "a8eae09e-495a-11e8-ba2b-0242ac130002", "a8eae1b6-495a-11e8-ba2b-0242ac130002", "a8eae2ce-495a-11e8-ba2b-0242ac130002", "a8eae3e6-495a-11e8-ba2b-0242ac130002", "a8eae4fe-495a-11e8-ba2b-0242ac130002", "a8eae620-495a-11e8-ba2b-0242ac130002", "a8eae738-495a-11e8-ba2b-0242ac130002", "a8eae850-495a-11e8-ba2b-0242ac130002", "a8eae97c-495a-11e8-ba2b-0242ac130002", "a8eaea94-495a-11e8-ba2b-0242ac130002", "a8eaebb6-495a-11e8-ba2b-0242ac130002", "a8eaecce-495a-11e8-ba2b-0242ac130002", "a8eaede6-495a-11e8-ba2b-0242ac130002", "a8eaeef4-495a-11e8-ba2b-0242ac130002", "a8eaf00c-495a-11e8-ba2b-0242ac130002", "a8eaf124-495a-11e8-ba2b-0242ac130002", "a8eaf23c-495a-11e8-ba2b-0242ac130002", "a8eaf354-495a-11e8-ba2b-0242ac130002", "a8eaf46c-495a-11e8-ba2b-0242ac130002", "a8eaf8cc-495a-11e8-ba2b-0242ac130002", "a8eafa0c-495a-11e8-ba2b-0242ac130002", "a8eafb2e-495a-11e8-ba2b-0242ac130002", "a8eafc46-495a-11e8-ba2b-0242ac130002", "a8eafd68-495a-11e8-ba2b-0242ac130002", "a8eafe80-495a-11e8-ba2b-0242ac130002", "a8eaff98-495a-11e8-ba2b-0242ac130002", "a8eb00b0-495a-11e8-ba2b-0242ac130002", "a8eb01c8-495a-11e8-ba2b-0242ac130002", "a8eb02e0-495a-11e8-ba2b-0242ac130002", "a8eb03f8-495a-11e8-ba2b-0242ac130002", "a8eb0506-495a-11e8-ba2b-0242ac130002", "a8eb0628-495a-11e8-ba2b-0242ac130002", "a8eb0740-495a-11e8-ba2b-0242ac130002", "a8eb0858-495a-11e8-ba2b-0242ac130002", "a8eb0984-495a-11e8-ba2b-0242ac130002", "a8eb0a9c-495a-11e8-ba2b-0242ac130002", "a8eb0bb4-495a-11e8-ba2b-0242ac130002", "a8eb0cd6-495a-11e8-ba2b-0242ac130002", "a8eb0df8-495a-11e8-ba2b-0242ac130002", "a8eb0f10-495a-11e8-ba2b-0242ac130002", "a8eb1028-495a-11e8-ba2b-0242ac130002", "a8eb1140-495a-11e8-ba2b-0242ac130002", "a8eb1258-495a-11e8-ba2b-0242ac130002", "a8eb1370-495a-11e8-ba2b-0242ac130002", "a8eb1492-495a-11e8-ba2b-0242ac130002", "a8eb15b4-495a-11e8-ba2b-0242ac130002", "a8eb16cc-495a-11e8-ba2b-0242ac130002", "a8eb17e4-495a-11e8-ba2b-0242ac130002", "a8eb18fc-495a-11e8-ba2b-0242ac130002", "a8eb1a14-495a-11e8-ba2b-0242ac130002", "a8eb1b2c-495a-11e8-ba2b-0242ac130002", "a8eb1c44-495a-11e8-ba2b-0242ac130002", "a8eb1d5c-495a-11e8-ba2b-0242ac130002", "a8eb1e74-495a-11e8-ba2b-0242ac130002", "a8eb1f96-495a-11e8-ba2b-0242ac130002", "a8eb20a4-495a-11e8-ba2b-0242ac130002", "a8eb21c6-495a-11e8-ba2b-0242ac130002", "a8eb22de-495a-11e8-ba2b-0242ac130002", "a8eb23f6-495a-11e8-ba2b-0242ac130002", "a8eb2504-495a-11e8-ba2b-0242ac130002", "a8eb261c-495a-11e8-ba2b-0242ac130002", "a8eb2734-495a-11e8-ba2b-0242ac130002", "a8eb2856-495a-11e8-ba2b-0242ac130002", "a8eb2964-495a-11e8-ba2b-0242ac130002", "a8eb2a7c-495a-11e8-ba2b-0242ac130002", "a8eb2b94-495a-11e8-ba2b-0242ac130002", "a8eb2cac-495a-11e8-ba2b-0242ac130002", "a8eb36fc-495a-11e8-ba2b-0242ac130002", "a8eb385a-495a-11e8-ba2b-0242ac130002", "a8eb397c-495a-11e8-ba2b-0242ac130002", "a8eb3a94-495a-11e8-ba2b-0242ac130002", "a8eb3bb6-495a-11e8-ba2b-0242ac130002", "a8eb3cce-495a-11e8-ba2b-0242ac130002", "a8eb3df0-495a-11e8-ba2b-0242ac130002", "a8eb3efe-495a-11e8-ba2b-0242ac130002", "a8eb4016-495a-11e8-ba2b-0242ac130002", "a8eb412e-495a-11e8-ba2b-0242ac130002", "a8eb4246-495a-11e8-ba2b-0242ac130002", "a8eb4354-495a-11e8-ba2b-0242ac130002", "a8eb446c-495a-11e8-ba2b-0242ac130002" ] }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": {}, "toc_section_display": true, "toc_window_display": true } }, "nbformat": 4, "nbformat_minor": 1 }