{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"2022-01-14-sess-retailrocket.ipynb","provenance":[{"file_id":"https://github.com/recohut/nbs/blob/main/raw/P846279%20%7C%20Predictability%20limits%20in%20session-based%20next%20item%20recommendation%20on%20Retailrocket%20data.ipynb","timestamp":1644614678385},{"file_id":"1U21hXOB6beyej0h0mhY2RKl19Cd_PJOl","timestamp":1639127788117},{"file_id":"https://github.com/recohut/recsys/blob/modules/modules/M859611_Preprocessing_Music_Session_Dataset.ipynb","timestamp":1638637273300}],"collapsed_sections":[],"toc_visible":true,"mount_file_id":"1N0xQicjQHtOFA3WEb4H3YBsPYBh8HSKb","authorship_tag":"ABX9TyN5qU9PfItIUU35g4A4q/h4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"markdown","source":["# Predictability limits in session-based next item recommendation on Retailrocket data"],"metadata":{"id":"0-C5ulLAspbz"}},{"cell_type":"markdown","source":["Estimate the predictability limits due to randomness and due to algorithm design in some methods of session-based recommendation."],"metadata":{"id":"IYkBeq6Rr6jJ"}},{"cell_type":"markdown","metadata":{"id":"KR2HTM55eBXn"},"source":["## Preprocessing RetailRocket Session Dataset"]},{"cell_type":"code","metadata":{"id":"-HJaBLCJPGow"},"source":["import numpy as np\n","import pandas as pd\n","from datetime import timezone, datetime, timedelta\n","import time"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"DQdMXuTlEERB","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1639127826324,"user_tz":-330,"elapsed":5817,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"69200791-4f08-407d-9edc-7110fae79257"},"source":["'''\n","preprocessing method [\"info\",\"org\",\"org_min_date\",\"days_test\",\"slice\",\"buys\"]\n"," info: just load and show info\n"," org: from gru4rec (last day => test set)\n"," org_min_date: from gru4rec (last day => test set) but from a minimal date onwards\n"," days_test: adapted from gru4rec (last N days => test set)\n"," slice: new (create multiple train-test-combinations with a sliding window approach \n"," buys: load buys and safe file to prepared\n","'''\n","# METHOD = \"slice\"\n","METHOD = input('Preprocessing method (info/org/org_min_date/days_test/slice/buys):') or 'slice'\n","assert(METHOD in 'info/org/org_min_date/days_test/slice/buys'.split('/')), 'Invalid Preprocessing method.'\n","\n","'''\n","data config (all methods)\n","'''\n","PATH = './retailrocket/'\n","PATH_PROCESSED = './retailrocket/slices/'\n","FILE = 'events'\n","\n","'''\n","org_min_date config\n","'''\n","MIN_DATE = '2015-09-02'\n","\n","'''\n","filtering config (all methods)\n","'''\n","SESSION_LENGTH = 30 * 60 #30 minutes\n","MIN_SESSION_LENGTH = 2\n","MIN_ITEM_SUPPORT = 5\n","MIN_DATE = '2014-04-01'\n","\n","'''\n","days test default config\n","'''\n","DAYS_TEST = 2\n","\n","'''\n","slicing default config\n","'''\n","NUM_SLICES = 5 #offset in days from the first date in the data set\n","DAYS_OFFSET = 0 #number of days the training start date is shifted after creating one slice\n","DAYS_SHIFT = 27\n","#each slice consists of...\n","DAYS_TRAIN = 25\n","DAYS_TEST = 2"],"execution_count":null,"outputs":[{"name":"stdout","output_type":"stream","text":["Preprocessing method (info/org/org_min_date/days_test/slice/buys):slice\n"]}]},{"cell_type":"code","metadata":{"id":"nlavuPjUD_Zm","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1639127836976,"user_tz":-330,"elapsed":2695,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"8dff44d5-6049-47da-e3b3-bad76059eecf"},"source":["!wget -q --show-progress https://github.com/RecoHut-Datasets/retail_rocket/raw/v2/retailrocket.zip\n","!unzip retailrocket.zip\n","!mkdir retailrocket/slices"],"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["retailrocket.zip 100%[===================>] 32.00M 147MB/s in 0.2s \n","Archive: retailrocket.zip\n"," creating: retailrocket/\n"," inflating: retailrocket/events.csv \n"," creating: retailrocket/prepared_window/\n"," inflating: retailrocket/prepared_window/events.0.hdf \n"," inflating: retailrocket/prepared_window/events.1.hdf \n"," inflating: retailrocket/prepared_window/events.2.hdf \n"," inflating: retailrocket/prepared_window/events.3.hdf \n"," inflating: retailrocket/prepared_window/events.4.hdf \n"]}]},{"cell_type":"code","metadata":{"id":"psEwAk-wbgBn"},"source":["#preprocessing from original gru4rec\n","def preprocess_org( path=PATH, file=FILE, path_proc=PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH ):\n"," \n"," data, buys = load_data( path+file )\n"," data = filter_data( data, min_item_support, min_session_length )\n"," split_data_org( data, path_proc+file )\n","\n","#preprocessing from original gru4rec but from a certain point in time\n","def preprocess_org_min_date( path=PATH, file=FILE, path_proc=PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH, min_date=MIN_DATE ):\n"," \n"," data, buys = load_data( path+file )\n"," data = filter_data( data, min_item_support, min_session_length )\n"," data = filter_min_date( data, min_date )\n"," split_data_org( data, path_proc+file )\n","\n","#preprocessing adapted from original gru4rec\n","def preprocess_days_test( path=PATH, file=FILE, path_proc=PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH, days_test=DAYS_TEST ):\n"," \n"," data, buys = load_data( path+file )\n"," data = filter_data( data, min_item_support, min_session_length )\n"," split_data( data, path_proc+file, days_test )\n","\n","#preprocessing from original gru4rec but from a certain point in time\n","def preprocess_days_test_min_date( path=PATH, file=FILE, path_proc=PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH, days_test=DAYS_TEST, min_date=MIN_DATE ):\n"," \n"," data, buys = load_data( path+file )\n"," data = filter_data( data, min_item_support, min_session_length )\n"," data = filter_min_date( data, min_date )\n"," split_data( data, path_proc+file, days_test )\n","\n","#preprocessing to create data slices with a sliding window\n","def preprocess_slices( path=PATH, file=FILE, path_proc=PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH,\n"," num_slices = NUM_SLICES, days_offset = DAYS_OFFSET, days_shift = DAYS_SHIFT, days_train = DAYS_TRAIN, days_test=DAYS_TEST ):\n"," \n"," data, buys = load_data( path+file )\n"," data = filter_data( data, min_item_support, min_session_length )\n"," slice_data( data, path_proc+file, num_slices, days_offset, days_shift, days_train, days_test )\n"," \n","#just load and show info\n","def preprocess_info( path=PATH, file=FILE, path_proc=PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH ):\n"," \n"," data, buys = load_data( path+file )\n"," data = filter_data( data, min_item_support, min_session_length )\n"," \n","def preprocess_save( path=PATH, file=FILE, path_proc=PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH ):\n"," \n"," data, buys = load_data( path+file )\n"," data = filter_data( data, min_item_support, min_session_length )\n"," data.to_csv(path_proc + file + '_preprocessed.txt', sep='\\t', index=False)\n"," \n","#preprocessing to create a file with buy actions\n","def preprocess_buys( path=PATH, file=FILE, path_proc=PATH_PROCESSED ): \n"," data, buys = load_data( path+file )\n"," store_buys(buys, path_proc+file)\n"," \n","def load_data( file ) : \n"," \n"," #load csv\n"," data = pd.read_csv( file+'.csv', sep=',', header=0, usecols=[0,1,2,3], dtype={0:np.int64, 1:np.int32, 2:str, 3:np.int32})\n"," #specify header names\n"," data.columns = ['Time','UserId','Type','ItemId']\n"," data['Time'] = (data.Time / 1000).astype( int )\n"," \n"," data.sort_values( ['UserId','Time'], ascending=True, inplace=True )\n"," \n"," #sessionize \n"," data['TimeTmp'] = pd.to_datetime(data.Time, unit='s')\n"," \n"," data.sort_values( ['UserId','TimeTmp'], ascending=True, inplace=True )\n","# users = data.groupby('UserId')\n"," \n"," data['TimeShift'] = data['TimeTmp'].shift(1)\n"," data['TimeDiff'] = (data['TimeTmp'] - data['TimeShift']).dt.total_seconds().abs()\n"," data['SessionIdTmp'] = (data['TimeDiff'] > SESSION_LENGTH).astype( int )\n"," data['SessionId'] = data['SessionIdTmp'].cumsum( skipna=False )\n"," del data['SessionIdTmp'], data['TimeShift'], data['TimeDiff']\n"," \n"," \n"," data.sort_values( ['SessionId','Time'], ascending=True, inplace=True )\n"," \n"," cart = data[data.Type == 'addtocart']\n"," data = data[data.Type == 'view']\n"," del data['Type']\n"," \n"," print(data)\n"," \n"," #output\n"," \n"," print( data.Time.min() )\n"," print( data.Time.max() )\n"," data_start = datetime.fromtimestamp( data.Time.min(), timezone.utc )\n"," data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc )\n"," \n"," del data['TimeTmp']\n"," \n"," print('Loaded data set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n"," format( len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )\n"," \n"," return data, cart;\n","\n","\n","def filter_data( data, min_item_support, min_session_length ) : \n"," \n"," #y?\n"," session_lengths = data.groupby('SessionId').size()\n"," data = data[np.in1d(data.SessionId, session_lengths[ session_lengths>1 ].index)]\n"," \n"," #filter item support\n"," item_supports = data.groupby('ItemId').size()\n"," data = data[np.in1d(data.ItemId, item_supports[ item_supports>= min_item_support ].index)]\n"," \n"," #filter session length\n"," session_lengths = data.groupby('SessionId').size()\n"," data = data[np.in1d(data.SessionId, session_lengths[ session_lengths>= min_session_length ].index)]\n"," \n"," #output\n"," data_start = datetime.fromtimestamp( data.Time.min(), timezone.utc )\n"," data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc )\n"," \n"," print('Filtered data set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n"," format( len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )\n"," \n"," return data;\n","\n","def filter_min_date( data, min_date='2014-04-01' ) :\n"," \n"," min_datetime = datetime.strptime(min_date + ' 00:00:00', '%Y-%m-%d %H:%M:%S')\n"," \n"," #filter\n"," session_max_times = data.groupby('SessionId').Time.max()\n"," session_keep = session_max_times[ session_max_times > min_datetime.timestamp() ].index\n"," \n"," data = data[ np.in1d(data.SessionId, session_keep) ]\n"," \n"," #output\n"," data_start = datetime.fromtimestamp( data.Time.min(), timezone.utc )\n"," data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc )\n"," \n"," print('Filtered data set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n"," format( len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )\n"," \n"," return data;\n","\n","\n","\n","def split_data_org( data, output_file ) :\n"," \n"," tmax = data.Time.max()\n"," session_max_times = data.groupby('SessionId').Time.max()\n"," session_train = session_max_times[session_max_times < tmax-86400].index\n"," session_test = session_max_times[session_max_times >= tmax-86400].index\n"," train = data[np.in1d(data.SessionId, session_train)]\n"," test = data[np.in1d(data.SessionId, session_test)]\n"," test = test[np.in1d(test.ItemId, train.ItemId)]\n"," tslength = test.groupby('SessionId').size()\n"," test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]\n"," print('Full train set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))\n"," train.to_csv(output_file + '_train_full.txt', sep='\\t', index=False)\n"," print('Test set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))\n"," test.to_csv(output_file + '_test.txt', sep='\\t', index=False)\n"," \n"," tmax = train.Time.max()\n"," session_max_times = train.groupby('SessionId').Time.max()\n"," session_train = session_max_times[session_max_times < tmax-86400].index\n"," session_valid = session_max_times[session_max_times >= tmax-86400].index\n"," train_tr = train[np.in1d(train.SessionId, session_train)]\n"," valid = train[np.in1d(train.SessionId, session_valid)]\n"," valid = valid[np.in1d(valid.ItemId, train_tr.ItemId)]\n"," tslength = valid.groupby('SessionId').size()\n"," valid = valid[np.in1d(valid.SessionId, tslength[tslength>=2].index)]\n"," print('Train set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique()))\n"," train_tr.to_csv( output_file + '_train_tr.txt', sep='\\t', index=False)\n"," print('Validation set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique()))\n"," valid.to_csv( output_file + '_train_valid.txt', sep='\\t', index=False)\n"," \n"," \n"," \n","def split_data( data, output_file, days_test ) :\n"," \n"," data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc )\n"," test_from = data_end - timedelta( days_test )\n"," \n"," session_max_times = data.groupby('SessionId').Time.max()\n"," session_train = session_max_times[ session_max_times < test_from.timestamp() ].index\n"," session_test = session_max_times[ session_max_times >= test_from.timestamp() ].index\n"," train = data[np.in1d(data.SessionId, session_train)]\n"," test = data[np.in1d(data.SessionId, session_test)]\n"," test = test[np.in1d(test.ItemId, train.ItemId)]\n"," tslength = test.groupby('SessionId').size()\n"," test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]\n"," print('Full train set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))\n"," train.to_csv(output_file + '_train_full.txt', sep='\\t', index=False)\n"," print('Test set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))\n"," test.to_csv(output_file + '_test.txt', sep='\\t', index=False)\n"," \n"," \n"," \n","def slice_data( data, output_file, num_slices, days_offset, days_shift, days_train, days_test ): \n"," \n"," for slice_id in range( 0, num_slices ) :\n"," split_data_slice( data, output_file, slice_id, days_offset+(slice_id*days_shift), days_train, days_test )\n","\n","def split_data_slice( data, output_file, slice_id, days_offset, days_train, days_test ) :\n"," \n"," data_start = datetime.fromtimestamp( data.Time.min(), timezone.utc )\n"," data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc )\n"," \n"," print('Full data set {}\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}'.\n"," format( slice_id, len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.isoformat(), data_end.isoformat() ) )\n"," \n"," \n"," start = datetime.fromtimestamp( data.Time.min(), timezone.utc ) + timedelta( days_offset ) \n"," middle = start + timedelta( days_train )\n"," end = middle + timedelta( days_test )\n"," \n"," #prefilter the timespan\n"," session_max_times = data.groupby('SessionId').Time.max()\n"," greater_start = session_max_times[session_max_times >= start.timestamp()].index\n"," lower_end = session_max_times[session_max_times <= end.timestamp()].index\n"," data_filtered = data[np.in1d(data.SessionId, greater_start.intersection( lower_end ))]\n"," \n"," print('Slice data set {}\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {} / {}'.\n"," format( slice_id, len(data_filtered), data_filtered.SessionId.nunique(), data_filtered.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat(), end.date().isoformat() ) )\n"," \n"," #split to train and test\n"," session_max_times = data_filtered.groupby('SessionId').Time.max()\n"," sessions_train = session_max_times[session_max_times < middle.timestamp()].index\n"," sessions_test = session_max_times[session_max_times >= middle.timestamp()].index\n"," \n"," train = data[np.in1d(data.SessionId, sessions_train)]\n"," \n"," print('Train set {}\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}'.\n"," format( slice_id, len(train), train.SessionId.nunique(), train.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat() ) )\n"," \n"," train.to_csv(output_file + '_train_full.'+str(slice_id)+'.txt', sep='\\t', index=False)\n"," \n"," test = data[np.in1d(data.SessionId, sessions_test)]\n"," test = test[np.in1d(test.ItemId, train.ItemId)]\n"," \n"," tslength = test.groupby('SessionId').size()\n"," test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]\n"," \n"," print('Test set {}\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {} \\n\\n'.\n"," format( slice_id, len(test), test.SessionId.nunique(), test.ItemId.nunique(), middle.date().isoformat(), end.date().isoformat() ) )\n"," \n"," test.to_csv(output_file + '_test.'+str(slice_id)+'.txt', sep='\\t', index=False)\n","\n","\n","def store_buys( buys, target ):\n"," buys.to_csv( target + '_buys.txt', sep='\\t', index=False )"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"WJQVmNwlBNy9","executionInfo":{"status":"ok","timestamp":1639127858592,"user_tz":-330,"elapsed":17550,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"84800b16-e522-45a2-919a-b41db43971cc"},"source":["if __name__ == '__main__':\n"," '''\n"," Run the preprocessing configured above.\n"," '''\n"," \n"," print( \"START preprocessing \", METHOD )\n"," sc, st = time.time(), time.time()\n"," \n"," if METHOD == \"info\":\n"," preprocess_info( PATH, FILE, MIN_ITEM_SUPPORT, MIN_SESSION_LENGTH )\n"," \n"," elif METHOD == \"org\":\n"," preprocess_org( PATH, FILE, PATH_PROCESSED, MIN_ITEM_SUPPORT, MIN_SESSION_LENGTH )\n"," \n"," elif METHOD == \"org_min_date\":\n"," preprocess_org_min_date( PATH, FILE, PATH_PROCESSED, MIN_ITEM_SUPPORT, MIN_SESSION_LENGTH, MIN_DATE )\n"," \n"," elif METHOD == \"day_test\":\n"," preprocess_days_test( PATH, FILE, PATH_PROCESSED, MIN_ITEM_SUPPORT, MIN_SESSION_LENGTH, DAYS_TEST )\n"," \n"," elif METHOD == \"day_test_min_date\":\n"," preprocess_days_test_min_date( PATH, FILE, PATH_PROCESSED, MIN_ITEM_SUPPORT, MIN_SESSION_LENGTH, DAYS_TEST, MIN_DATE )\n"," \n"," elif METHOD == \"slice\":\n"," preprocess_slices( PATH, FILE, PATH_PROCESSED, MIN_ITEM_SUPPORT, MIN_SESSION_LENGTH, NUM_SLICES, DAYS_OFFSET, DAYS_SHIFT, DAYS_TRAIN, DAYS_TEST )\n"," \n"," elif METHOD == \"buys\":\n"," preprocess_buys( PATH, FILE, PATH_PROCESSED )\n"," \n"," elif METHOD == \"save\":\n"," preprocess_save( PATH, FILE, PATH_PROCESSED, MIN_ITEM_SUPPORT, MIN_SESSION_LENGTH )\n"," \n"," else: \n"," print( \"Invalid method \", METHOD )\n"," \n"," print( \"END preproccessing \", (time.time() - sc), \"c \", (time.time() - st), \"s\" )"],"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["START preprocessing slice\n"," Time UserId ItemId TimeTmp SessionId\n","1361687 1442004589 0 285930 2015-09-11 20:49:49 0\n","1367212 1442004759 0 357564 2015-09-11 20:52:39 0\n","1367342 1442004917 0 67045 2015-09-11 20:55:17 0\n","830385 1439487966 1 72028 2015-08-13 17:46:06 1\n","742616 1438969904 2 325215 2015-08-07 17:51:44 2\n","... ... ... ... ... ...\n","206556 1433972768 1407575 121220 2015-06-10 21:46:08 1761093\n","47311 1433343689 1407576 356208 2015-06-03 15:01:29 1761094\n","1762583 1431899284 1407577 427784 2015-05-17 21:48:04 1761095\n","1744277 1431825683 1407578 188736 2015-05-17 01:21:23 1761096\n","482559 1435184526 1407579 2521 2015-06-24 22:22:06 1761097\n","\n","[2664312 rows x 5 columns]\n","1430622011\n","1442545187\n","Loaded data set\n","\tEvents: 2664312\n","\tSessions: 1755206\n","\tItems: 234838\n","\tSpan: 2015-05-03 / 2015-09-18\n","\n","\n","Filtered data set\n","\tEvents: 1085763\n","\tSessions: 306919\n","\tItems: 49070\n","\tSpan: 2015-05-03 / 2015-09-18\n","\n","\n","Full data set 0\n","\tEvents: 1085763\n","\tSessions: 306919\n","\tItems: 49070\n","\tSpan: 2015-05-03T03:00:33+00:00 / 2015-09-18T02:58:58+00:00\n","Slice data set 0\n","\tEvents: 230003\n","\tSessions: 63746\n","\tItems: 32977\n","\tSpan: 2015-05-03 / 2015-05-28 / 2015-05-30\n","Train set 0\n","\tEvents: 213660\n","\tSessions: 59110\n","\tItems: 32052\n","\tSpan: 2015-05-03 / 2015-05-28\n","Test set 0\n","\tEvents: 14457\n","\tSessions: 4136\n","\tItems: 6506\n","\tSpan: 2015-05-28 / 2015-05-30 \n","\n","\n","Full data set 1\n","\tEvents: 1085763\n","\tSessions: 306919\n","\tItems: 49070\n","\tSpan: 2015-05-03T03:00:33+00:00 / 2015-09-18T02:58:58+00:00\n","Slice data set 1\n","\tEvents: 229891\n","\tSessions: 62631\n","\tItems: 33577\n","\tSpan: 2015-05-30 / 2015-06-24 / 2015-06-26\n","Train set 1\n","\tEvents: 212266\n","\tSessions: 57795\n","\tItems: 32529\n","\tSpan: 2015-05-30 / 2015-06-24\n","Test set 1\n","\tEvents: 15425\n","\tSessions: 4260\n","\tItems: 6801\n","\tSpan: 2015-06-24 / 2015-06-26 \n","\n","\n","Full data set 2\n","\tEvents: 1085763\n","\tSessions: 306919\n","\tItems: 49070\n","\tSpan: 2015-05-03T03:00:33+00:00 / 2015-09-18T02:58:58+00:00\n","Slice data set 2\n","\tEvents: 224835\n","\tSessions: 62257\n","\tItems: 34396\n","\tSpan: 2015-06-26 / 2015-07-21 / 2015-07-23\n","Train set 2\n","\tEvents: 207176\n","\tSessions: 57229\n","\tItems: 33453\n","\tSpan: 2015-06-26 / 2015-07-21\n","Test set 2\n","\tEvents: 15650\n","\tSessions: 4486\n","\tItems: 6937\n","\tSpan: 2015-07-21 / 2015-07-23 \n","\n","\n","Full data set 3\n","\tEvents: 1085763\n","\tSessions: 306919\n","\tItems: 49070\n","\tSpan: 2015-05-03T03:00:33+00:00 / 2015-09-18T02:58:58+00:00\n","Slice data set 3\n","\tEvents: 206774\n","\tSessions: 60383\n","\tItems: 33305\n","\tSpan: 2015-07-23 / 2015-08-17 / 2015-08-19\n","Train set 3\n","\tEvents: 191128\n","\tSessions: 55786\n","\tItems: 32263\n","\tSpan: 2015-07-23 / 2015-08-17\n","Test set 3\n","\tEvents: 13486\n","\tSessions: 3959\n","\tItems: 6065\n","\tSpan: 2015-08-17 / 2015-08-19 \n","\n","\n","Full data set 4\n","\tEvents: 1085763\n","\tSessions: 306919\n","\tItems: 49070\n","\tSpan: 2015-05-03T03:00:33+00:00 / 2015-09-18T02:58:58+00:00\n","Slice data set 4\n","\tEvents: 179452\n","\tSessions: 53595\n","\tItems: 30420\n","\tSpan: 2015-08-19 / 2015-09-13 / 2015-09-15\n","Train set 4\n","\tEvents: 166160\n","\tSessions: 49492\n","\tItems: 29543\n","\tSpan: 2015-08-19 / 2015-09-13\n","Test set 4\n","\tEvents: 11502\n","\tSessions: 3561\n","\tItems: 5329\n","\tSpan: 2015-09-13 / 2015-09-15 \n","\n","\n","END preproccessing 17.43916940689087 c 17.43917155265808 s\n"]}]},{"cell_type":"markdown","source":["## Limit using entropy rate estimation"],"metadata":{"id":"UQa-FNXIohQF"}},{"cell_type":"markdown","source":["### Convert the session data into a sequence format file"],"metadata":{"id":"HUHpOrO-ohNY"}},{"cell_type":"code","source":["import time\n","import os.path\n","import numpy as np\n","import pandas as pd\n","from _datetime import timezone, datetime\n","\n","\n","def load_data( path, file, rows_train=None, rows_test=None, slice_num=None, density=1, train_eval=False ):\n"," '''\n"," Loads a tuple of training and test set with the given parameters. \n"," Parameters\n"," --------\n"," path : string\n"," Base path to look in for the prepared data files\n"," file : string\n"," Prefix of the dataset you want to use.\n"," \"yoochoose-clicks-full\" loads yoochoose-clicks-full_train_full.txt and yoochoose-clicks-full_test.txt\n"," rows_train : int or None\n"," Number of rows to load from the training set file. \n"," This option will automatically filter the test set to only retain items included in the training set. \n"," rows_test : int or None\n"," Number of rows to load from the test set file. \n"," slice_num : \n"," Adds a slice index to the constructed file_path\n"," yoochoose-clicks-full_train_full.0.txt\n"," density : float\n"," Percentage of the sessions to randomly retain from the original data (0-1). \n"," The result is cached for the execution of multiple experiments. \n"," Returns\n"," --------\n"," out : tuple of pandas.DataFrame\n"," (train, test)\n"," \n"," '''\n"," \n"," print('START load data') \n"," st = time.time()\n"," sc = time.time()\n"," \n"," split = ''\n"," if( slice_num != None and isinstance(slice_num, int ) ):\n"," split = '.'+str(slice_num)\n"," \n"," train_appendix = '_train_full'\n"," test_appendix = '_test'\n"," if train_eval:\n"," train_appendix = '_train_tr'\n"," test_appendix = '_train_valid'\n"," \n"," density_appendix = ''\n"," if( density < 1 ): #create sample\n"," \n"," if not os.path.isfile( path + file + train_appendix + split + '.txt.'+str( density ) ) :\n"," \n"," train = pd.read_csv(path + file + train_appendix + split + '.txt', sep='\\t', dtype={'ItemId':np.int64})\n"," test = pd.read_csv(path + file + test_appendix + split + '.txt', sep='\\t', dtype={'ItemId':np.int64} )\n"," \n"," sessions = train.SessionId.unique() \n"," drop_n = round( len(sessions) - (len(sessions) * density) )\n"," drop_sessions = np.random.choice(sessions, drop_n, replace=False)\n"," train = train[ ~train.SessionId.isin( drop_sessions ) ]\n"," train.to_csv( path + file + train_appendix +split+'.txt.'+str(density), sep='\\t', index=False )\n"," \n"," sessions = test.SessionId.unique() \n"," drop_n = round( len(sessions) - (len(sessions) * density) )\n"," drop_sessions = np.random.choice(sessions, drop_n, replace=False)\n"," test = test[ ~test.SessionId.isin( drop_sessions ) ]\n"," test = test[np.in1d(test.ItemId, train.ItemId)]\n"," test.to_csv( path + file + test_appendix +split+'.txt.'+str(density), sep='\\t', index=False )\n"," \n"," density_appendix = '.'+str(density)\n"," \n"," if( rows_train == None ):\n"," train = pd.read_csv(path + file + train_appendix +split+'.txt'+density_appendix, sep='\\t', dtype={'ItemId':np.int64})\n"," else:\n"," train = pd.read_csv(path + file + train_appendix +split+'.txt'+density_appendix, sep='\\t', dtype={'ItemId':np.int64}, nrows=rows_train)\n"," session_lengths = train.groupby('SessionId').size()\n"," train = train[np.in1d(train.SessionId, session_lengths[ session_lengths>1 ].index)] \n"," \n"," if( rows_test == None ):\n"," test = pd.read_csv(path + file + test_appendix +split+'.txt'+density_appendix, sep='\\t', dtype={'ItemId':np.int64} )\n"," else :\n"," test = pd.read_csv(path + file + test_appendix +split+'.txt'+density_appendix, sep='\\t', dtype={'ItemId':np.int64}, nrows=rows_test )\n"," session_lengths = test.groupby('SessionId').size()\n"," test = test[np.in1d(test.SessionId, session_lengths[ session_lengths>1 ].index)]\n"," \n","# rows_train = 10000\n","# train = train.tail(10000)\n"," \n"," if( rows_train != None ):\n"," test = test[np.in1d(test.ItemId, train.ItemId)]\n"," session_lengths = test.groupby('SessionId').size()\n"," test = test[np.in1d(test.SessionId, session_lengths[ session_lengths>1 ].index)]\n"," \n"," #output\n"," data_start = datetime.fromtimestamp( train.Time.min(), timezone.utc )\n"," data_end = datetime.fromtimestamp( train.Time.max(), timezone.utc )\n"," \n"," print('Loaded train set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n'.\n"," format( len(train), train.SessionId.nunique(), train.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )\n"," \n"," data_start = datetime.fromtimestamp( test.Time.min(), timezone.utc )\n"," data_end = datetime.fromtimestamp( test.Time.max(), timezone.utc )\n"," \n"," print('Loaded test set\\n\\tEvents: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n'.\n"," format( len(test), test.SessionId.nunique(), test.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )\n"," \n"," print( 'END load data ', (time.time()-sc), 'c / ', (time.time()-st), 's' )\n"," \n"," return (train, test)\n","\n","\n","def load_buys( path, file ):\n"," '''\n"," Load all buy events from the youchoose file, retains events fitting in the given test set and merges both data sets into one\n"," Parameters\n"," --------\n"," path : string\n"," Base path to look in for the prepared data files\n"," file : string\n"," Prefix of the dataset you want to use.\n"," \"yoochoose-clicks-full\" loads yoochoose-clicks-full_train_full.txt and yoochoose-clicks-full_test.txt\n"," \n"," Returns\n"," --------\n"," out : pandas.DataFrame\n"," test with buys\n"," \n"," '''\n"," \n"," print('START load buys') \n"," st = time.time()\n"," sc = time.time()\n"," \n"," #load csv\n"," buys = pd.read_csv(path + file + '.txt', sep='\\t', dtype={'ItemId':np.int64})\n"," \n"," print( 'END load buys ', (time.time()-sc), 'c / ', (time.time()-st), 's' )\n"," \n"," return buys"],"metadata":{"id":"IrQHjidKohHi"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["def dump_sequence(data_path, file_prefix, out_fn, density=1, slic=0):\n"," \"\"\"\n"," Convert training/testing slices into a sequence format\n"," suitable for entropy rate estimation\n"," \"\"\"\n","\n"," train, test = load_data(data_path, file_prefix,\n"," rows_train=None, rows_test=None, density=density,\n"," slice_num=slic)\n","\n"," # append all\n"," all_data = train.append(test)\n","\n"," # sort by sequence, then timestamp\n"," groupby = all_data.groupby(\"SessionId\")\n"," with open(out_fn, \"w\") as f:\n"," for session_id, session in groupby:\n"," item_ids = [item_id for\n"," item_id in session.sort_values(\"Time\")[\"ItemId\"]]\n"," for item_id in item_ids:\n"," f.write(\"{}\\n\".format(item_id))\n"," f.write(\"-1\\n\")"],"metadata":{"id":"eFg1Ml3YohE0"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["data_path = './retailrocket/slices/'\n","file_prefix = 'events'\n","output_file = './retailrocket/seq/s0.txt'\n","d = 1 # downsample the input data (0.1 - use only 10%% of input)\n","s = 0 # slice number, 0-4\n","\n","!mkdir ./retailrocket/seq\n","dump_sequence(data_path, file_prefix, output_file, d, s)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"svYQ5fPdpU4h","executionInfo":{"status":"ok","timestamp":1639128270265,"user_tz":-330,"elapsed":29672,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"10cf5f56-e767-4e6a-b8d4-1fdca2543664"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["START load data\n","Loaded train set\n","\tEvents: 213660\n","\tSessions: 59110\n","\tItems: 32052\n","\tSpan: 2015-05-03 / 2015-05-28\n","\n","Loaded test set\n","\tEvents: 14457\n","\tSessions: 4136\n","\tItems: 6506\n","\tSpan: 2015-05-28 / 2015-05-30\n","\n","END load data 0.10811591148376465 c / 0.10811710357666016 s\n"]}]},{"cell_type":"markdown","source":["### Entropy rate estimation"],"metadata":{"id":"sKYOt42XqFPS"}},{"cell_type":"code","source":["import numpy as np\n","from collections import defaultdict\n","\n","def calc_entropy2(in_fn):\n"," \"\"\"\n"," Entropy rate estimation for a sequence\n"," input: file with each sequence element (integer) on its own row\n"," \"\"\"\n"," with open(in_fn) as f:\n"," events = [int(l.strip()) for l in f]\n","\n"," # calculate Lempel-Ziv estimate of entropy\n"," lambda_sum = 0\n"," seq1 = set() # single item sequences\n"," seq2 = set() # two-item sequences\n"," seq3 = defaultdict(list) # three-item sequences index\n","\n"," n = len(events)\n"," print(in_fn, n)\n"," timestep = int(n / 10) + 1\n"," for i in range(n):\n"," k_max = 0\n"," # single item\n"," if events[i] in seq1:\n"," k_max = 1\n"," # two items\n"," if i + 1 < n and tuple(events[i:i+2]) in seq2:\n"," k_max = 2\n"," # three or more\n"," if i + 2 < n:\n"," for subseq_start in seq3[tuple(events[i:i+3])]:\n"," k = 3\n"," while subseq_start + k < i and i + k < n:\n"," if events[subseq_start + k] != events[i + k]:\n"," break\n"," k += 1\n"," k_max = max(k, k_max)\n","\n"," lambda_sum += (k_max + 1) # as in Xu, et al. (2019)\n"," #print(i, ev, k_max)\n","\n"," # update index\n"," seq1.add(events[i])\n"," if i > 0:\n"," seq2.add(tuple(events[i-1:i+1]))\n"," if i > 1:\n"," seq3[tuple(events[i-2:i+1])].append(i - 2)\n","\n"," if i % timestep == 0 and i > 0:\n"," print(i, \"done\")\n","\n"," S = (n / lambda_sum) * np.log2(n)\n"," print(\"S:\", S)\n"," print(\"m (for \\Pi^max equation):\", len(seq1))"],"metadata":{"id":"Iv7VGm-_qFMY"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["input_file = './retailrocket/seq/s0.txt'\n","\n","calc_entropy2(input_file)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"RjW1htoSqOjZ","executionInfo":{"status":"ok","timestamp":1639128345079,"user_tz":-330,"elapsed":1715,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"e27aafbd-ff61-435f-8f1e-1330c9e9d584"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["./retailrocket/seq/s0.txt 291363\n","29137 done\n","58274 done\n","87411 done\n","116548 done\n","145685 done\n","174822 done\n","203959 done\n","233096 done\n","262233 done\n","S: 7.136608275066677\n","m (for \\Pi^max equation): 32053\n"]}]},{"cell_type":"markdown","source":["The predictability limit can be computed using the entropy rate estimate S and the unique event count m."],"metadata":{"id":"XTIJYG-wqFFM"}},{"cell_type":"markdown","source":["## Limit for some algorithms"],"metadata":{"id":"Pe9rmiXDqFAt"}},{"cell_type":"markdown","source":["Calculate co-occurrence of the item to predict (in a recommendation accuracy test) and the current item (given to the recommender as an input) in the training data."],"metadata":{"id":"m_VZDP4JqxXI"}},{"cell_type":"code","source":["def test_all(data_path, file_prefix, density=1, slic=[0]):\n"," all_stats = defaultdict(int)\n"," for i in slic:\n"," train, test = load_data(data_path, file_prefix,\n"," rows_train=None, rows_test=None, density=density,\n"," slice_num=i)\n"," \n"," s, i2s = load_sessions(train)\n"," print(data_path, file_prefix, i)\n","\n"," stats = test_reachability(s, i2s, test)\n"," for k, v in stats.items():\n"," all_stats[k] += v\n"," for k, v in all_stats.items():\n"," print(k, v)"],"metadata":{"id":"4NWJY9Avq9gJ"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["def test_reachability(sessions, item2session, data, max_span=10):\n"," \"\"\"Item co-occurrence in sessions\"\"\"\n"," stats = {\"r_cnt\" : 0,\n"," \"cnt_next\" : 0,\n"," \"cnt_fwd10\" : 0,\n"," \"cnt_anywhere\" : 0,\n"," \"cnt_anywhere_sess\" : 0}\n","\n"," groupby = data.groupby(\"SessionId\")\n"," for session_id, session in groupby:\n"," item_ids = [item_id for\n"," item_id in session.sort_values(\"Time\")[\"ItemId\"]]\n","\n"," l = len(item_ids)\n"," for i in range(l - 1):\n"," # step 1: calculate relative to current item\n"," # MC cnt_next\n"," # SR, windowed NB cnt_fwd10\n"," # AR cnt_anywhere\n"," item_id = item_ids[i]\n"," target_id = item_ids[i + 1]\n","\n"," next_found = 0\n"," fwd10_found = 0\n"," any_found = 0\n"," sess_found = 0\n"," seen_sessions = set()\n","\n"," # loop through all sessions\n"," for train_sess_id in item2session[item_id]:\n"," seen_sessions.add(train_sess_id)\n"," train_sess = sessions[train_sess_id]\n"," last_item = None\n"," for i, train_item in enumerate(train_sess):\n"," if train_item == target_id:\n"," any_found = 1\n"," sess_found = 1\n"," if last_item == item_id:\n"," next_found = 1\n"," fwd10_found = 1\n"," break\n"," elif not fwd10_found and i > 1 and item_id in train_sess[max(0, i - max_span):i - 1]:\n"," fwd10_found = 1\n"," last_item = train_item\n","\n"," if next_found:\n"," break\n"," # otherwise need to keep searching other sessions\n","\n"," # step 2: search using the remainder of the items seen so far\n"," # NB cnt_anywhere_sess\n"," if not sess_found:\n"," sess_so_far = set(item_ids[:i])\n"," for item_id in sess_so_far:\n"," for train_sess_id in item2session[item_id]:\n"," if train_sess_id in seen_sessions:\n"," continue\n"," seen_sessions.add(train_sess_id)\n","\n"," train_sess = sessions[train_sess_id]\n"," last_item = None\n"," for i, train_item in enumerate(train_sess):\n"," if train_item == target_id:\n"," sess_found = 1\n"," break\n","\n"," # summarize results\n"," stats[\"r_cnt\"] += 1\n"," stats[\"cnt_next\"] += next_found\n"," stats[\"cnt_fwd10\"] += fwd10_found\n"," stats[\"cnt_anywhere\"] += any_found\n"," stats[\"cnt_anywhere_sess\"] += sess_found\n","\n"," return stats"],"metadata":{"id":"0uj-l1gArBvG"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["def test_forward_backward(sessions, item2session, data):\n"," \"\"\"Statistics of whether the item to predict occurs\n"," before or after the current item (when co-occurring in a session)\n"," \"\"\"\n"," stats = {\"f_cnt\" : 0,\n"," \"cnt_bwd\" : 0,\n"," \"cnt_fwd\" : 0,\n"," \"cnt_both\" : 0}\n","\n"," groupby = data.groupby(\"SessionId\")\n"," for session_id, session in groupby:\n"," item_ids = [item_id for\n"," item_id in session.sort_values(\"Time\")[\"ItemId\"]]\n","\n"," l = len(item_ids)\n"," for i in range(l - 1):\n"," item_id = item_ids[i]\n"," target_id = item_ids[i + 1]\n"," if item_id == target_id:\n"," continue\n","\n"," common_sessions = set(item2session[item_id]).intersection(\n"," set(item2session[target_id]))\n","\n"," bwd = 0\n"," fwd = 0\n"," both = 0\n","\n"," # loop through all sessions\n"," for train_sess_id in common_sessions:\n"," train_sess = sessions[train_sess_id]\n"," item_pos = []\n"," target_pos = []\n"," for i in range(len(train_sess)):\n"," if train_sess[i] == item_id:\n"," item_pos.append(i)\n"," elif train_sess[i] == target_id:\n"," target_pos.append(i)\n","\n"," b = f = 0\n"," if min(target_pos) < max(item_pos):\n"," b = 1\n"," if min(item_pos) < max(target_pos):\n"," f = 1\n"," bwd += b\n"," fwd += f\n"," if b == f:\n"," both += 1\n","\n"," # summarize results\n"," stats[\"f_cnt\"] += len(common_sessions)\n"," stats[\"cnt_bwd\"] += bwd\n"," stats[\"cnt_fwd\"] += fwd\n"," stats[\"cnt_both\"] += both\n","\n"," return stats"],"metadata":{"id":"69fghpYJrDsS"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["def test_out_edges(sessions, item2session):\n"," \"\"\"Count outgoing edges in an item-to-item graph\n"," (edge is one item following another in a session)\n"," \"\"\"\n"," stats = {\"e_cnt\" : 0,\n"," \"cnt_u20\" : 0,\n"," \"cnt_u10\" : 0,\n"," \"cnt_u05\" : 0}\n","\n"," out_cnt = defaultdict(set)\n"," for session_id, item_ids in sessions.items():\n","\n"," last_item_id = None\n"," for item_id in item_ids:\n"," if last_item_id is not None:\n"," out_cnt[last_item_id].add(item_id)\n"," last_item_id = item_id\n","\n"," for item_id, out_edges in out_cnt.items():\n"," stats[\"e_cnt\"] += 1\n"," l = len(out_edges)\n"," if l <= 20:\n"," stats[\"cnt_u20\"] += 1\n"," if l <= 10:\n"," stats[\"cnt_u10\"] += 1\n"," if l <= 5:\n"," stats[\"cnt_u05\"] += 1\n","\n"," return stats"],"metadata":{"id":"P0nzL4xgrE_d"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["def load_sessions(data):\n"," \"\"\"Build a dictionary of sessions and a lookup map for\n"," finding which sessions an item belongs to\n"," \"\"\"\n"," sessions = defaultdict(list)\n"," item2session = defaultdict(list)\n","\n"," groupby = data.groupby(\"SessionId\")\n"," for session_id, session in groupby:\n"," item_ids = [item_id for\n"," item_id in session.sort_values(\"Time\")[\"ItemId\"]]\n"," sessions[session_id] = item_ids\n","\n"," for item_id in item_ids:\n"," item2session[item_id].append(session_id)\n","\n"," return sessions, item2session"],"metadata":{"id":"8if8VdzErGAa"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["d = 1 # downsample the input data (0.1 - use only 10%% of input)\n","data_path = './retailrocket/slices/'\n","file_prefix = 'events'\n","\n","test_all(data_path, file_prefix, d, [0,1,2,3,4])"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"QA8t130QrHCe","executionInfo":{"status":"ok","timestamp":1639128792812,"user_tz":-330,"elapsed":143166,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"17bf1ff2-3285-4f72-b5c5-1840d3be7f2f"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["START load data\n","Loaded train set\n","\tEvents: 213660\n","\tSessions: 59110\n","\tItems: 32052\n","\tSpan: 2015-05-03 / 2015-05-28\n","\n","Loaded test set\n","\tEvents: 14457\n","\tSessions: 4136\n","\tItems: 6506\n","\tSpan: 2015-05-28 / 2015-05-30\n","\n","END load data 0.10039901733398438 c / 0.10040020942687988 s\n","./retailrocket/slices/ events 0\n","START load data\n","Loaded train set\n","\tEvents: 212266\n","\tSessions: 57795\n","\tItems: 32529\n","\tSpan: 2015-05-30 / 2015-06-24\n","\n","Loaded test set\n","\tEvents: 15425\n","\tSessions: 4260\n","\tItems: 6801\n","\tSpan: 2015-06-24 / 2015-06-26\n","\n","END load data 0.08721494674682617 c / 0.08721661567687988 s\n","./retailrocket/slices/ events 1\n","START load data\n","Loaded train set\n","\tEvents: 207176\n","\tSessions: 57229\n","\tItems: 33453\n","\tSpan: 2015-06-26 / 2015-07-21\n","\n","Loaded test set\n","\tEvents: 15650\n","\tSessions: 4486\n","\tItems: 6937\n","\tSpan: 2015-07-21 / 2015-07-23\n","\n","END load data 0.08588290214538574 c / 0.08588480949401855 s\n","./retailrocket/slices/ events 2\n","START load data\n","Loaded train set\n","\tEvents: 191128\n","\tSessions: 55786\n","\tItems: 32263\n","\tSpan: 2015-07-23 / 2015-08-17\n","\n","Loaded test set\n","\tEvents: 13486\n","\tSessions: 3959\n","\tItems: 6065\n","\tSpan: 2015-08-17 / 2015-08-19\n","\n","END load data 0.08804202079772949 c / 0.0880436897277832 s\n","./retailrocket/slices/ events 3\n","START load data\n","Loaded train set\n","\tEvents: 166160\n","\tSessions: 49492\n","\tItems: 29543\n","\tSpan: 2015-08-19 / 2015-09-13\n","\n","Loaded test set\n","\tEvents: 11502\n","\tSessions: 3561\n","\tItems: 5329\n","\tSpan: 2015-09-13 / 2015-09-15\n","\n","END load data 0.07229804992675781 c / 0.07229948043823242 s\n","./retailrocket/slices/ events 4\n","r_cnt 50118\n","cnt_next 18197\n","cnt_fwd10 22035\n","cnt_anywhere 28417\n","cnt_anywhere_sess 40052\n"]}]},{"cell_type":"markdown","source":["`\"r_cnt\"` in results is the total number of test cases examined.\n","\n","Interpreting the results:\n","\n","| Key | Item to predict appears | Applies to algorithm |\n","| ------------- | ------------- | ----- |\n","| cnt_next | next to current item | MC, SF-SKNN |\n","| cnt_fwd10 | among 10 items after current item | SR |\n","| cnt_anywhere | anywhere in session | AR, IKNN |\n","| cnt_anywhere_sess | in session with any current session item | \\*SKNN |"],"metadata":{"id":"Cf5ltvjjqxQP"}},{"cell_type":"markdown","metadata":{"id":"g-iyxuibDWgT"},"source":["---"]},{"cell_type":"code","metadata":{"id":"xgr27_gWDWgV"},"source":["# !apt-get -qq install tree\n","# !rm -r sample_data"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"9ivY8TNnDWgW","executionInfo":{"status":"ok","timestamp":1638638389489,"user_tz":-330,"elapsed":431,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"88120d85-2201-4e66-ec9a-6827e870d1ca"},"source":["# !tree -h --du ."],"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":[".\n","├── [217M] retailrocket\n","│   ├── [ 90M] events.csv\n","│   ├── [ 60M] prepared_window\n","│   │   ├── [ 12M] events.0.hdf\n","│   │   ├── [ 12M] events.1.hdf\n","│   │   ├── [ 12M] events.2.hdf\n","│   │   ├── [ 11M] events.3.hdf\n","│   │   └── [ 11M] events.4.hdf\n","│   └── [ 67M] slices\n","│   ├── [115K] events_test.txt\n","│   ├── [ 33M] events_train_full.txt\n","│   ├── [ 33M] events_train_tr.txt\n","│   └── [132K] events_train_valid.txt\n","└── [ 32M] retailrocket.zip\n","\n"," 249M used in 3 directories, 11 files\n"]}]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"bKMlo1SfDWgX","executionInfo":{"status":"ok","timestamp":1638638417182,"user_tz":-330,"elapsed":3625,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"7ee406f5-fcd8-472e-944c-e8029b80553a"},"source":["# !pip install -q watermark\n","# %reload_ext watermark\n","# %watermark -a \"Sparsh A.\" -m -iv -u -t -d"],"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["Author: Sparsh A.\n","\n","Last updated: 2021-12-04 17:20:19\n","\n","Compiler : GCC 7.5.0\n","OS : Linux\n","Release : 5.4.104+\n","Machine : x86_64\n","Processor : x86_64\n","CPU cores : 2\n","Architecture: 64bit\n","\n","IPython: 5.5.0\n","pandas : 1.1.5\n","numpy : 1.19.5\n","sys : 3.7.12 (default, Sep 10 2021, 00:21:48) \n","[GCC 7.5.0]\n","\n"]}]},{"cell_type":"markdown","metadata":{"id":"GWg5v7TbDWgY"},"source":["---"]},{"cell_type":"markdown","metadata":{"id":"yPjQ9oQtDWgZ"},"source":["**END**"]}]}