{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"series = 'PP4/2'"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
""
],
"text/vnd.plotly.v1+html": [
""
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
""
],
"text/vnd.plotly.v1+html": [
""
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import os\n",
"import pandas as pd\n",
"import series_details\n",
"import plotly.offline as py\n",
"py.init_notebook_mode()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"df = pd.read_csv(os.path.join('data', '{}.csv'.format(series.replace('/', '-'))), parse_dates=['start_date', 'end_date'])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"scrolled": false
},
"outputs": [
{
"data": {
"text/html": [
"
National Archives of Australia: Series PP4/2
"
],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Applications for certificates of exemption from dictation test with supporting documents, annual single number series
"
],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total items | 613 |
---|
Access status | |
---|
Open | 610 (99.51%) |
Not yet examined | 3 (0.49%) |
Number of items digitised | 28 (4.57%) |
---|
Number of pages digitised | 1,512 |
---|
Date of earliest content | 1903 |
---|
Date of latest content | 1947 |
---|
Download the complete CSV file
"
],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"series_details.display_summary(series, df)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Content preview"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"scrolled": false
},
"outputs": [
{
"data": {
"text/html": [
" \n",
" \n",
" \n",
" | \n",
" identifier | \n",
" series | \n",
" control_symbol | \n",
" title | \n",
" contents_dates | \n",
" start_date | \n",
" end_date | \n",
" access_status | \n",
" location | \n",
" digitised_status | \n",
" digitised_pages | \n",
"
\n",
" \n",
" 0 | \n",
" 4317014 | \n",
" PP4/2 | \n",
" 1915/1801 | \n",
" Ah FAN [Chinese] [Application for certificate of exemption from dictation test] | \n",
" 1915 - 1915 | \n",
" 1915-01-01 00:00:00 | \n",
" 1915-01-01 00:00:00 | \n",
" Open | \n",
" Perth | \n",
" True | \n",
" 22 | \n",
"
\n",
" 1 | \n",
" 4317015 | \n",
" PP4/2 | \n",
" 1926/12 | \n",
" Chung ON [Chinese] [Application for certificate of exemption from dictation test] | \n",
" 1913 - 1926 | \n",
" 1913-01-01 00:00:00 | \n",
" 1926-01-01 00:00:00 | \n",
" Open | \n",
" Perth | \n",
" False | \n",
" 0 | \n",
"
\n",
" 2 | \n",
" 4317016 | \n",
" PP4/2 | \n",
" 1926/26 | \n",
" Chong Ah SIE [KEE] [Chinese] [Application for certificate of exemption from dictation test] | \n",
" 1918 - 1926 | \n",
" 1918-01-01 00:00:00 | \n",
" 1926-01-01 00:00:00 | \n",
" Open | \n",
" Perth | \n",
" True | \n",
" 32 | \n",
"
\n",
" 3 | \n",
" 4317017 | \n",
" PP4/2 | \n",
" 1926/53 | \n",
" Joan Hoon KWONG [Chinese] [Application for certificate of exemption from dictation test] | \n",
" 1926 - 1928 | \n",
" 1926-01-01 00:00:00 | \n",
" 1928-01-01 00:00:00 | \n",
" Open | \n",
" Perth | \n",
" False | \n",
" 0 | \n",
"
\n",
" 4 | \n",
" 4317018 | \n",
" PP4/2 | \n",
" 1926/57 | \n",
" To King GUE [Chinese] [Application for certificate of exemption from dictation test] | \n",
" 1926 - 1928 | \n",
" 1926-01-01 00:00:00 | \n",
" 1928-01-01 00:00:00 | \n",
" Open | \n",
" Perth | \n",
" False | \n",
" 0 | \n",
"
\n",
"
"
],
"text/plain": [
""
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Change the number_of_rows value to see more\n",
"number_of_rows = 5\n",
"\n",
"# Display dataframe \n",
"df[:number_of_rows].style.set_properties(['title'], **{'text-align': 'left'}).set_table_styles([dict(selector=\"th\", props=[(\"text-align\", \"center\")]),\n",
" dict(selector='.row_heading, .blank', props=[('display', 'none')])])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Plot content dates"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.plotly.v1+json": {
"data": [
{
"name": "Digitised",
"type": "bar",
"x": [
1905,
1906,
1907,
1908,
1909,
1910,
1911,
1912,
1913,
1914,
1915,
1916,
1917,
1918,
1919,
1920,
1921,
1922,
1923,
1924,
1925,
1926,
1927,
1928,
1929,
1930,
1931,
1932,
1933,
1934,
1935,
1936,
1937,
1938,
1939,
1940,
1941,
1942,
1943,
1944,
1945,
1946,
1947
],
"y": [
1,
1,
2,
2,
2,
4,
4,
6,
7,
9,
11,
10,
12,
14,
15,
15,
16,
17,
18,
18,
19,
20,
19,
20,
20,
21,
18,
15,
13,
9,
8,
7,
7,
7,
6,
3,
1,
1,
1,
1,
1,
1,
1
]
},
{
"name": "Not digitised",
"type": "bar",
"x": [
1903,
1904,
1905,
1906,
1907,
1908,
1909,
1910,
1911,
1912,
1913,
1914,
1915,
1916,
1917,
1918,
1919,
1920,
1921,
1922,
1923,
1924,
1925,
1926,
1927,
1928,
1929,
1930,
1931,
1932,
1933,
1934,
1935,
1936,
1937,
1938,
1939,
1940,
1941,
1942,
1943,
1944,
1945,
1946,
1947
],
"y": [
2,
3,
6,
9,
14,
18,
26,
34,
46,
68,
92,
109,
146,
177,
199,
231,
274,
300,
343,
359,
375,
384,
399,
423,
427,
426,
416,
371,
321,
275,
248,
214,
194,
165,
141,
105,
83,
34,
21,
5,
5,
5,
5,
5,
4
]
}
],
"layout": {
"barmode": "stack",
"title": "Content dates",
"xaxis": {
"title": "Year"
},
"yaxis": {
"title": "Number of items"
}
}
},
"text/html": [
""
],
"text/vnd.plotly.v1+html": [
""
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"fig = series_details.plot_dates(df)\n",
"py.iplot(fig, filename='series-dates-bar')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## View word frequencies"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"# Combine all of the file titles into a single string\n",
"title_text = a = df['title'].str.lower().str.cat(sep=' ')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
" \n",
" \n",
" \n",
" | \n",
" word | \n",
" count | \n",
"
\n",
" \n",
" 3 | \n",
" application | \n",
" 602 | \n",
"
\n",
" 4 | \n",
" certificate | \n",
" 602 | \n",
"
\n",
" 5 | \n",
" exemption | \n",
" 602 | \n",
"
\n",
" 6 | \n",
" dictation | \n",
" 602 | \n",
"
\n",
" 7 | \n",
" test | \n",
" 602 | \n",
"
\n",
" 2 | \n",
" chinese | \n",
" 418 | \n",
"
\n",
" 0 | \n",
" ah | \n",
" 129 | \n",
"
\n",
" 33 | \n",
" japanese | \n",
" 109 | \n",
"
\n",
" 28 | \n",
" indian | \n",
" 56 | \n",
"
\n",
" 75 | \n",
" lee | \n",
" 45 | \n",
"
\n",
" 66 | \n",
" wong | \n",
" 31 | \n",
"
\n",
" 34 | \n",
" fong | \n",
" 28 | \n",
"
\n",
" 76 | \n",
" sing | \n",
" 20 | \n",
"
\n",
" 130 | \n",
" mrs | \n",
" 17 | \n",
"
\n",
" 185 | \n",
" singh | \n",
" 16 | \n",
"
\n",
" 285 | \n",
" yee | \n",
" 15 | \n",
"
\n",
" 19 | \n",
" chen | \n",
" 14 | \n",
"
\n",
" 9 | \n",
" chong | \n",
" 13 | \n",
"
\n",
" 20 | \n",
" wing | \n",
" 12 | \n",
"
\n",
" 60 | \n",
" wah | \n",
" 11 | \n",
"
\n",
" 38 | \n",
" arrived | \n",
" 10 | \n",
"
\n",
" 8 | \n",
" chung | \n",
" 10 | \n",
"
\n",
" 157 | \n",
" quan | \n",
" 10 | \n",
"
\n",
" 159 | \n",
" chin | \n",
" 9 | \n",
"
\n",
" 1 | \n",
" fan | \n",
" 9 | \n",
"
\n",
"
"
],
"text/plain": [
""
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"series_details.display_word_counts(title_text)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
" \n",
" \n",
" \n",
" | \n",
" ngram | \n",
" count | \n",
"
\n",
" \n",
" 0 | \n",
" dictation test | \n",
" 602 | \n",
"
\n",
" 1 | \n",
" certificate of | \n",
" 602 | \n",
"
\n",
" 2 | \n",
" from dictation | \n",
" 602 | \n",
"
\n",
" 3 | \n",
" of exemption | \n",
" 602 | \n",
"
\n",
" 4 | \n",
" exemption from | \n",
" 602 | \n",
"
\n",
" 5 | \n",
" for certificate | \n",
" 601 | \n",
"
\n",
" 6 | \n",
" application for | \n",
" 601 | \n",
"
\n",
" 7 | \n",
" chinese application | \n",
" 416 | \n",
"
\n",
" 8 | \n",
" test ah | \n",
" 117 | \n",
"
\n",
" 9 | \n",
" japanese application | \n",
" 109 | \n",
"
\n",
" 10 | \n",
" indian application | \n",
" 56 | \n",
"
\n",
" 11 | \n",
" test lee | \n",
" 36 | \n",
"
\n",
" 12 | \n",
" test wong | \n",
" 26 | \n",
"
\n",
" 13 | \n",
" test mrs | \n",
" 16 | \n",
"
\n",
" 14 | \n",
" singh indian | \n",
" 16 | \n",
"
\n",
" 15 | \n",
" sing chinese | \n",
" 15 | \n",
"
\n",
" 16 | \n",
" test chen | \n",
" 13 | \n",
"
\n",
" 17 | \n",
" fong chinese | \n",
" 12 | \n",
"
\n",
" 18 | \n",
" test fong | \n",
" 11 | \n",
"
\n",
" 19 | \n",
" test yee | \n",
" 10 | \n",
"
\n",
" 20 | \n",
" you chinese | \n",
" 10 | \n",
"
\n",
" 21 | \n",
" chong chinese | \n",
" 10 | \n",
"
\n",
" 22 | \n",
" shing chinese | \n",
" 8 | \n",
"
\n",
" 23 | \n",
" test chin | \n",
" 7 | \n",
"
\n",
" 24 | \n",
" wing chinese | \n",
" 7 | \n",
"
\n",
"
"
],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Change ngram_count for larger ngrams (trigrams etc)\n",
"ngram_count = 2\n",
"series_details.display_top_ngrams(title_text, ngram_count)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}