{ "cells": [ { "cell_type": "raw", "metadata": {}, "source": [ "%env MKL_NUM_THREADS=16\n", "%env OMP_NUM_THREADS=16" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "from ipypb import track\n", "\n", "from polara.evaluation import evaluation_engine as ee\n", "from polara.evaluation.pipelines import find_optimal_svd_rank\n", "from polara import (SVDModel,\n", " PopularityModel,\n", " RandomModel)\n", "from polara.recommender.hybrid.models import SimilarityAggregation\n", "from polara.recommender.coldstart.models import (SVDModelItemColdStart,\n", " RandomModelItemColdStart,\n", " PopularityModelItemColdStart,\n", " SimilarityAggregationItemColdStart)\n", "\n", "from data_preprocessing import (get_amazon_data,\n", " get_similarity_data,\n", " prepare_data_model,\n", " prepare_cold_start_data_model)\n", "from utils import (report_results, save_results,\n", " apply_config, print_data_stats,\n", " save_training_time, save_cv_training_time)\n", "%matplotlib inline" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "from polara.recommender import defaults\n", "defaults.memory_hard_limit = 15 # allowed memory usage during recommendations generation" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "seed = 42" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "experiment_name = 'baseline'" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Experiment setup" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "data_labels = ['AMZe', 'AMZvg']" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "ranks_grid = [1, 5, 10, 15, 20, 30, 50, 60, 75, 100, 125, 150, 200, 250, 300,\n", " 350, 400, 500, 750, 1000, 1250, 1500, 1750, 2000, 2500, 3000]\n", "svd_ranks = {'AMZe': ranks_grid,\n", " 'AMZvg': ranks_grid\n", " }" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "topk_values = [1, 3, 10, 20, 30]" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "target_metric = 'mrr'" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "data_dict = dict.fromkeys(data_labels)\n", "meta_dict = dict.fromkeys(data_labels)\n", "similarities = dict.fromkeys(data_labels)\n", "sim_indices = dict.fromkeys(data_labels)\n", "feature_idx = dict.fromkeys(data_labels)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "all_data = [data_dict, similarities, sim_indices, meta_dict]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Amazon Electronics" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "lbl = 'AMZe'" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "data_dict[lbl], meta_dict[lbl] = get_amazon_data('../datasets/amazon/ratings_Electronics.csv',\n", " meta_path='../datasets/amazon/meta/meta_Electronics.json.gz',\n", " implicit=True,\n", " pcore=5,\n", " filter_no_meta=True,\n", " flat_categories=True) # take only bottom level categories" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [], "source": [ "similarities[lbl], sim_indices[lbl], feature_idx[lbl] = get_similarity_data(meta_dict[lbl])" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "0.0" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "(meta_dict[lbl].applymap(len).sum(axis=1)==0).mean()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Amazon Video Games" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "lbl = 'AMZvg'" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [], "source": [ "data_dict[lbl], meta_dict[lbl] = get_amazon_data('../datasets/amazon/ratings_Video_Games.csv',\n", " meta_path='../datasets/amazon/meta/meta_Video_Games.json.gz',\n", " implicit=True,\n", " pcore=5,\n", " filter_data={'categories': ['Games']}, # filter uniformative category\n", " filter_no_meta=True,\n", " flat_categories=True) # take only bottom level categories" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [], "source": [ "similarities[lbl], sim_indices[lbl], feature_idx[lbl] = get_similarity_data(meta_dict[lbl])" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "0.0" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "(meta_dict[lbl].applymap(len).sum(axis=1)==0).mean()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Data stats" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "AMZe\n", "{'userid': 124895, 'asin': 44843}\n", "density 0.019153791836615672\n", "similarity matrix density 1.1054998336712965\n", "AMZvg\n", "{'userid': 14251, 'asin': 6858}\n", "density 0.13281340440589384\n", "similarity matrix density 9.081814734274188\n" ] } ], "source": [ "print_data_stats(data_labels, all_data)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Standard experiment" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [], "source": [ "def prepare_recommender_models(data_label, data_models, config):\n", " data_model = data_models[data_label]\n", " models = [SVDModel(data_model),\n", " SimilarityAggregation(data_model),\n", " PopularityModel(data_model),\n", " RandomModel(data_model, seed=seed)]\n", " apply_config(models, config, data_label)\n", " return models\n", "\n", "\n", "def fine_tune_svd(model, ranks, label, record_time=False):\n", " best_svd_rank, svd_scores = find_optimal_svd_rank(model, ranks, target_metric,\n", " return_scores=True,\n", " iterator=lambda x: track(x, label=f'{label} ranks'))\n", " model_config = {model.method: {'rank': best_svd_rank}}\n", " model_scores = {model.method: svd_scores}\n", " try:\n", " if record_time:\n", " save_training_time(experiment_name, model, pd.Index([max(ranks)], name='rank'), label)\n", " finally:\n", " return model_config, model_scores" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## tuning" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [], "source": [ "config = {}\n", "scores = {}\n", "data_models = {}" ] }, { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [ { "data": { "text/html": [ "