{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"2022-01-22-matn-yelp.ipynb","provenance":[{"file_id":"https://github.com/recohut/nbs/blob/main/raw/T161774%20%7C%20MATN%20on%20Yelp%20in%20Tensorflow.ipynb","timestamp":1644661989586}],"collapsed_sections":[],"toc_visible":true,"authorship_tag":"ABX9TyOz5D7JJ6gHokxWC7U1FIXC"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"B6APV93JvrV0"},"source":["# MATN on Yelp in Tensorflow\n","> Multiplex Behavioral Relation Learning for Recommendation via Memory Augmented Transformer Network"]},{"cell_type":"markdown","metadata":{"id":"2hoHcDfpumh7"},"source":["## Setup"]},{"cell_type":"code","metadata":{"id":"SwuiBSWoqctn"},"source":["!git clone https://github.com/akaxlh/MATN.git"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"mYENSPmkqdsP"},"source":["!apt-get install tree"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"mYqs8GfEqf6r","executionInfo":{"status":"ok","timestamp":1634112044005,"user_tz":-330,"elapsed":614,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"f6d1527a-0efc-4b08-ce0a-c240ef5a5b70"},"source":["!tree --du -h ./MATN"],"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["./MATN\n","├── [2.6K] DataHandler.py\n","├── [102M] Datasets\n","│   ├── [ 79M] MultiInt-ML10M\n","│   │   ├── [ 40M] buy\n","│   │   │   ├── [ 1] 1\n","│   │   │   ├── [ 11M] trn_neg\n","│   │   │   ├── [ 12M] trn_neutral.rar\n","│   │   │   ├── [ 16M] trn_pos.rar\n","│   │   │   └── [ 84K] tst_int\n","│   │   └── [ 40M] click\n","│   │   ├── [ 1] 1\n","│   │   ├── [ 11M] trn_neg\n","│   │   ├── [ 12M] trn_neutral.rar\n","│   │   ├── [ 16M] trn_pos.rar\n","│   │   └── [ 84K] tst_int\n","│   └── [ 23M] yelp\n","│   ├── [ 11M] buy\n","│   │   ├── [ 2] tem\n","│   │   ├── [1.6M] trn_neg\n","│   │   ├── [1.9M] trn_neutral\n","│   │   ├── [5.2M] trn_pos\n","│   │   ├── [2.3M] trn_tip\n","│   │   └── [273K] tst_int\n","│   ├── [ 11M] click\n","│   │   ├── [ 1] 1\n","│   │   ├── [1.6M] trn_neg\n","│   │   ├── [1.9M] trn_neutral\n","│   │   ├── [5.3M] trn_pos\n","│   │   ├── [2.2M] trn_tip\n","│   │   └── [273K] tst_int\n","│   └── [ 2] placeholder\n","├── [9.8K] labcode.py\n","├── [1.6K] Params.py\n","└── [ 10K] Utils\n"," ├── [5.0K] NNLayers.py\n"," ├── [ 5] placeholder\n"," └── [1.1K] TimeLogger.py\n","\n"," 102M used in 8 directories, 29 files\n"]}]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"kb2v5YbGq-tO","executionInfo":{"status":"ok","timestamp":1634113312265,"user_tz":-330,"elapsed":1031,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"fbcb80c3-1df9-4efd-e28c-054d02671cbd"},"source":["%tensorflow_version 1.x"],"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["TensorFlow 1.x selected.\n"]}]},{"cell_type":"code","metadata":{"id":"6F9nZ0BDvD_e"},"source":["!mkdir -p History Models"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"fRo1wpS-uo1R"},"source":["import pickle\n","import argparse\n","import pickle\n","import numpy as np\n","import datetime\n","from scipy.sparse import csr_matrix\n","\n","import tensorflow as tf\n","from tensorflow.core.protobuf import config_pb2\n","from tensorflow.contrib.layers import xavier_initializer"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"qjOKVmpErJU0"},"source":["## NN Layers"]},{"cell_type":"code","metadata":{"id":"WwWuxkJ0q8P1"},"source":["paramId = 0\n","biasDefault = False\n","params = {}\n","regParams = {}\n","ita = 0.2\n","leaky = 0.01\n","\n","def getParamId():\n","\tglobal paramId\n","\tparamId += 1\n","\treturn paramId\n","\n","def setIta(ITA):\n","\tita = ITA\n","\n","def setBiasDefault(val):\n","\tglobal biasDefault\n","\tbiasDefault = val\n","\n","def getParam(name):\n","\treturn params[name]\n","\n","def addReg(name, param):\n","\tglobal regParams\n","\tif name not in regParams:\n","\t\tregParams[name] = param\n","\t# else:\n","\t# \tprint('ERROR: Parameter already exists')\n","\n","def addParam(name, param):\n","\tglobal params\n","\tif name not in params:\n","\t\tparams[name] = param\n","\n","def defineParam(name, shape, dtype=tf.float32, reg=False,\n","\tinitializer='xavier', trainable=True):\n","\tglobal params\n","\tglobal regParams\n","\tif initializer == 'xavier':\n","\t\tret = tf.get_variable(name=name, dtype=dtype, shape=shape,\n","\t\t\tinitializer=xavier_initializer(dtype=tf.float32),\n","\t\t\ttrainable=trainable)\n","\telif initializer == 'trunc_normal':\n","\t\tret = tf.get_variable(name=name, initializer=tf.random.truncated_normal(shape=[int(shape[0]), shape[1]], mean=0.0, stddev=0.03, dtype=dtype))\n","\telif initializer == 'zeros':\n","\t\tret = tf.get_variable(name=name, dtype=dtype,\n","\t\t\tinitializer=tf.zeros(shape=shape, dtype=tf.float32),\n","\t\t\ttrainable=trainable)\n","\telif initializer == 'ones':\n","\t\tret = tf.get_variable(name=name, dtype=dtype, initializer=tf.ones(shape=shape, dtype=tf.float32), trainable=trainable)\n","\telif not isinstance(initializer, str):\n","\t\tret = tf.get_variable(name=name, dtype=dtype,\n","\t\t\tinitializer=initializer, trainable=trainable)\n","\telse:\n","\t\tprint('ERROR: Unrecognized initializer')\n","\t\texit()\n","\tparams[name] = ret\n","\tif reg:\n","\t\tregParams[name] = ret\n","\treturn ret\n","\n","def getOrDefineParam(name, shape, dtype=tf.float32, reg=False,\n","\tinitializer='xavier', trainable=True):\n","\tglobal params\n","\tglobal regParams\n","\tif name in params:\n","\t\tif reg and name not in regParams:\n","\t\t\tregParams[name] = params[name]\n","\t\treturn params[name]\n","\treturn defineParam(name, shape, dtype, reg, initializer, trainable)\n","\n","def BN(inp, name=None):\n","\tglobal ita\n","\tdim = inp.get_shape()[1]\n","\tname = 'defaultParamName%d'%getParamId()\n","\tscale = tf.Variable(tf.ones([dim]))\n","\tshift = tf.Variable(tf.zeros([dim]))\n","\tfcMean, fcVar = tf.nn.moments(inp, axes=[0])\n","\tema = tf.train.ExponentialMovingAverage(decay=0.5)\n","\temaApplyOp = ema.apply([fcMean, fcVar])\n","\twith tf.control_dependencies([emaApplyOp]):\n","\t\tmean = tf.identity(fcMean)\n","\t\tvar = tf.identity(fcVar)\n","\tret = tf.nn.batch_normalization(inp, mean, var, shift,\n","\t\tscale, 1e-8)\n","\treturn ret\n","\n","def FC(inp, outDim, name=None, useBias=False, activation=None,\n","\treg=False, useBN=False, dropout=None, initializer='xavier', noDrop=False):\n","\tglobal params\n","\tglobal regParams\n","\tglobal leaky\n","\t# useBias = biasDefault\n","\t# if not noDrop:\n","\t# \tinp = tf.nn.dropout(inp, rate=0.001)\n","\tinDim = inp.get_shape()[1]\n","\ttemName = name if name!=None else 'defaultParamName%d'%getParamId()\n","\tW = getOrDefineParam(temName, [inDim, outDim], reg=reg, initializer=initializer)\n","\tif dropout != None:\n","\t\tret = tf.nn.dropout(inp, rate=dropout) @ W\n","\telse:\n","\t\tret = inp @ W\n","\tif useBias:\n","\t\ttemBiasName = temName + 'Bias'\n","\t\tbias = getOrDefineParam(temBiasName, outDim, reg=False, initializer='zeros')\n","\t\tret = ret + bias\n","\tif useBN:\n","\t\tret = BN(ret)\n","\tif activation != None:\n","\t\tret = Activate(ret, activation)\n","\treturn ret\n","\n","def Bias(data, name=None, reg=False):\n","\tinDim = data.get_shape()[-1]\n","\ttemName = name if name!=None else 'defaultParamName%d'%getParamId()\n","\ttemBiasName = temName + 'Bias'\n","\tbias = getOrDefineParam(temBiasName, inDim, reg=False, initializer='zeros')\n","\tif reg:\n","\t\tregParams[temBiasName] = bias\n","\treturn data + bias\n","\n","def ActivateHelp(data, method):\n","\tif method == 'relu':\n","\t\tret = tf.nn.relu(data)\n","\telif method == 'sigmoid':\n","\t\tret = tf.nn.sigmoid(data)\n","\telif method == 'tanh':\n","\t\tret = tf.nn.tanh(data)\n","\telif method == 'softmax':\n","\t\tret = tf.nn.softmax(data, axis=-1)\n","\telif method == 'leakyRelu':\n","\t\tret = tf.maximum(leaky*data, data)\n","\telif method == 'twoWayLeakyRelu':\n","\t\ttemMask = tf.to_float(tf.greater(data, 1.0))\n","\t\tret = temMask * (1 + leaky * (data - 1)) + (1 - temMask) * tf.maximum(leaky * data, data)\n","\telif method == '-1relu':\n","\t\tret = tf.maximum(-1.0, data)\n","\telif method == 'relu6':\n","\t\tret = tf.maximum(0.0, tf.minimum(6.0, data))\n","\telse:\n","\t\traise Exception('Error Activation Function')\n","\treturn ret\n","\n","def Activate(data, method, useBN=False):\n","\tglobal leaky\n","\tif useBN:\n","\t\tret = BN(data)\n","\telse:\n","\t\tret = data\n","\tret = ActivateHelp(ret, method)\n","\treturn ret\n","\n","def Regularize(names=None, method='L2'):\n","\tret = 0\n","\tif method == 'L1':\n","\t\tif names != None:\n","\t\t\tfor name in names:\n","\t\t\t\tret += tf.reduce_sum(tf.abs(getParam(name)))\n","\t\telse:\n","\t\t\tfor name in regParams:\n","\t\t\t\tret += tf.reduce_sum(tf.abs(regParams[name]))\n","\telif method == 'L2':\n","\t\tif names != None:\n","\t\t\tfor name in names:\n","\t\t\t\tret += tf.reduce_sum(tf.square(getParam(name)))\n","\t\telse:\n","\t\t\tfor name in regParams:\n","\t\t\t\tret += tf.reduce_sum(tf.square(regParams[name]))\n","\treturn ret\n","\n","def Dropout(data, rate):\n","\tif rate == None:\n","\t\treturn data\n","\telse:\n","\t\treturn tf.nn.dropout(data, rate=rate)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"xlLh1HkrrLB7"},"source":["## Logger"]},{"cell_type":"code","metadata":{"id":"v-wrKKZfrOyd"},"source":["logmsg = ''\n","timemark = dict()\n","saveDefault = False\n","\n","def log(msg, save=None, oneline=False):\n","\tglobal logmsg\n","\tglobal saveDefault\n","\ttime = datetime.datetime.now()\n","\ttem = '%s: %s' % (time, msg)\n","\tif save != None:\n","\t\tif save:\n","\t\t\tlogmsg += tem + '\\n'\n","\telif saveDefault:\n","\t\tlogmsg += tem + '\\n'\n","\tif oneline:\n","\t\tprint(tem, end='\\r')\n","\telse:\n","\t\tprint(tem)\n","\n","def marktime(marker):\n","\tglobal timemark\n","\ttimemark[marker] = datetime.datetime.now()\n","\n","def SpentTime(marker):\n","\tglobal timemark\n","\tif marker not in timemark:\n","\t\tmsg = 'LOGGER ERROR, marker', marker, ' not found'\n","\t\ttem = '%s: %s' % (time, msg)\n","\t\tprint(tem)\n","\t\treturn False\n","\treturn datetime.datetime.now() - timemark[marker]\n","\n","def SpentTooLong(marker, day=0, hour=0, minute=0, second=0):\n","\tglobal timemark\n","\tif marker not in timemark:\n","\t\tmsg = 'LOGGER ERROR, marker', marker, ' not found'\n","\t\ttem = '%s: %s' % (time, msg)\n","\t\tprint(tem)\n","\t\treturn False\n","\treturn datetime.datetime.now() - timemark[marker] >= datetime.timedelta(days=day, hours=hour, minutes=minute, seconds=second)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"8-W6urrWrPKE"},"source":["## Data Handler"]},{"cell_type":"code","metadata":{"id":"6eoRj_smrTR9"},"source":["# predir = 'Datasets/Tmall/backup/hr_ndcg_click/'\n","# predir = 'Datasets/MultiInt-ML10M/buy/'\n","predir = 'MATN/Datasets/yelp/click/'\n","trnfile = predir + 'trn_'\n","tstfile = predir + 'tst_'\n","# behs = ['pv', 'fav', 'cart', 'buy']\n","# behs = ['neg', 'neutral', 'pos']\n","behs = ['tip', 'neg', 'neutral', 'pos']\n","\n","def helpInit(a, b, c):\n","\tret = [[None] * b for i in range(a)]\n","\tfor i in range(a):\n","\t\tfor j in range(b):\n","\t\t\tret[i][j] = [None] * c\n","\treturn ret\n","\n","def LoadData():\n","\tfor i in range(len(behs)):\n","\t\tbeh = behs[i]\n","\t\tpath = trnfile + beh\n","\t\twith open(path, 'rb') as fs:\n","\t\t\tmat = (2**i)*(pickle.load(fs)!=0)\n","\t\ttrnMat = (mat if i==0 else trnMat + mat)\n","\t\t# if i == len(behs)-1:\n","\t\t# \tbuyMat = 1 * (mat != 0)\n","\tbuyMat = 1 * (trnMat != 0)\n","\t# test set\n","\tpath = tstfile + 'int'\n","\twith open(path, 'rb') as fs:\n","\t\ttstInt = np.array(pickle.load(fs))\n","\ttstStat = (tstInt!=None)\n","\ttstUsrs = np.reshape(np.argwhere(tstStat!=False), [-1])\n","\n","\treturn trnMat, tstInt, buyMat, tstUsrs\n","\n","def getmask(low, high, trnMat, tstUsrs, tstInt):\n","\tcnts = np.reshape(np.array(np.sum(trnMat, axis=-1)), [-1])\n","\tlst = list()\n","\tfor usr in tstUsrs:\n","\t\tlst.append((cnts[usr], usr))\n","\tlst.sort(key=lambda x: x[0])\n","\tlength = len(lst)\n","\tl = int(low * length)\n","\tr = int(high * length)\n","\tret = set()\n","\tfor i in range(l, r):\n","\t\tret.add(lst[i][1])\n","\treturn ret\n","\n","def negSamp(tembuy, curlist):\n","\ttemsize = 1000#1000\n","\tnegset = [None] * temsize\n","\tcur = 0\n","\tfor temcur in curlist:\n","\t\tif tembuy[temcur] == 0:\n","\t\t\tnegset[cur] = temcur\n","\t\t\tcur += 1\n","\t\tif cur == temsize:\n","\t\t\tbreak\n","\tnegset = np.array(negset[:cur])\n","\treturn negset\n","\n","def TransMat(mat):\n","\tuser, item = mat.shape\n","\tdata = mat.data\n","\tindices = mat.indices\n","\tindptr = mat.indptr\n","\n","\tnewdata = [None] * len(data)\n","\trowInd = [None] * len(data)\n","\tcolInd = [None] * len(data)\n","\tlength = 0\n","\n","\tfor i in range(user):\n","\t\ttemlocs = indices[indptr[i]: indptr[i+1]]\n","\t\ttemvals = data[indptr[i]: indptr[i+1]]\n","\t\tfor j in range(len(temlocs)):\n","\t\t\trowInd[length] = temlocs[j]\n","\t\t\tcolInd[length] = i\n","\t\t\tnewdata[length] = temvals[j]\n","\t\t\tlength += 1\n","\tif length != len(data):\n","\t\tprint('ERROR IN Trans', length, len(data))\n","\t\texit()\n","\ttpMat = csr_matrix((newdata, (rowInd, colInd)), shape=[item, user])\n","\treturn tpMat\n","\n","def binFind(pred, shoot):\n","\tminn = np.min(pred)\n","\tmaxx = np.max(pred)\n","\tl = minn\n","\tr = maxx\n","\twhile True:\n","\t\tmid = (l + r) / 2\n","\t\ttem = (pred - mid) > 0\n","\t\tnum = np.sum(tem)\n","\t\tif num == shoot or np.abs(l - r)<1e-3:\n","\t\t\tarr = tem\n","\t\t\tbreak\n","\t\tif num > shoot:\n","\t\t\tl = mid\n","\t\telse:\n","\t\t\tr = mid\n","\treturn np.reshape(np.argwhere(tem), [-1])[:shoot]"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"UijaFqykrYpR"},"source":["## Params"]},{"cell_type":"code","metadata":{"id":"hvhp5wkNrbbG"},"source":["def parse_args():\n","\tparser = argparse.ArgumentParser(description='Model Params')\n","\tparser.add_argument('--lr', default=2e-4, type=float, help='learning rate')\n","\tparser.add_argument('--batch', default=32, type=int, help='batch size')\n","\tparser.add_argument('--reg', default=1e-2, type=float, help='weight decay regularizer')\n","\t# parser.add_argument('--epoch', default=120, type=int, help='number of epochs')\n","\tparser.add_argument('--epoch', default=12, type=int, help='number of epochs')\n","\tparser.add_argument('--decay', default=0.96, type=float, help='weight decay rate')\n","\tparser.add_argument('--save_path', default='tem', help='file name to save model and training record')\n","\tparser.add_argument('--latdim', default=8, type=int, help='embedding size')\n","\tparser.add_argument('--memosize', default=4, type=int, help='memory size')\n","\tparser.add_argument('--posbat', default=40, type=int, help='batch size of positive sampling')\n","\tparser.add_argument('--negsamp', default=1, type=int, help='rate of negative sampling')\n","\tparser.add_argument('--att_head', default=2, type=int, help='number of attention heads')\n","\tparser.add_argument('--trn_num', default=10000, type=int, help='number of training instances per epoch')\n","\tparser.add_argument('--load_model', default=None, help='model name to load')\n","\tparser.add_argument('--shoot', default=10, type=int, help='K of top k')\n","\treturn parser.parse_args(args={})\n"," \n","args = parse_args()\n","# args.user = 805506#147894\n","# args.item = 584050#99037\n","# ML10M\n","# args.user = 67788\n","# args.item = 8704\n","# yelp\n","args.user = 19800\n","args.item = 22734\n","\n","# swap user and item\n","# tem = args.user\n","# args.user = args.item\n","# args.item = tem\n","\n","# args.decay_step = args.trn_num\n","args.decay_step = args.item//args.batch"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"HPwctu5nrf5h"},"source":["## Run"]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"Qgl9jdg3rjk6","executionInfo":{"status":"ok","timestamp":1634114080098,"user_tz":-330,"elapsed":763277,"user":{"displayName":"Sparsh Agarwal","photoUrl":"https://lh3.googleusercontent.com/a/default-user=s64","userId":"13037694610922482904"}},"outputId":"67f20ec1-b10f-47a8-bcee-85494b51dabf"},"source":["class Recommender:\n","\tdef __init__(self, sess, datas, inpDim):\n","\t\tself.inpDim = inpDim\n","\t\tself.sess = sess\n","\t\tself.trnMat, self.tstInt, self.buyMat, self.tstUsrs = datas\n","\t\tself.metrics = dict()\n","\t\tmets = ['Loss', 'preLoss' 'HR', 'NDCG']\n","\t\tfor met in mets:\n","\t\t\tself.metrics['Train'+met] = list()\n","\t\t\tself.metrics['Test'+met] = list()\n","\n","\tdef makePrint(self, name, ep, reses, save):\n","\t\tret = 'Epoch %d/%d, %s: ' % (ep, args.epoch, name)\n","\t\tfor metric in reses:\n","\t\t\tval = reses[metric]\n","\t\t\tret += '%s = %.4f, ' % (metric, val)\n","\t\t\ttem = name + metric\n","\t\t\tif save and tem in self.metrics:\n","\t\t\t\tself.metrics[tem].append(val)\n","\t\tret = ret[:-2] + ' '\n","\t\treturn ret\n","\n","\tdef run(self):\n","\t\tself.prepareModel()\n","\t\tlog('Model Prepared')\n","\t\tif args.load_model != None:\n","\t\t\tself.loadModel()\n","\t\t\tstloc = len(self.metrics['TrainLoss'])\n","\t\telse:\n","\t\t\tstloc = 0\n","\t\t\tinit = tf.global_variables_initializer()\n","\t\t\tself.sess.run(init)\n","\t\t\tlog('Variables Inited')\n","\t\tfor ep in range(stloc, args.epoch):\n","\t\t\ttest = (ep % 3 == 0)\n","\t\t\treses = self.trainEpoch()\n","\t\t\tlog(self.makePrint('Train', ep, reses, test))\n","\t\t\tif test:\n","\t\t\t\treses = self.testEpoch()\n","\t\t\t\tlog(self.makePrint('Test', ep, reses, test))\n","\t\t\tif ep % 5 == 0:\n","\t\t\t\tself.saveHistory()\n","\t\t\tprint()\n","\t\treses = self.testEpoch()\n","\t\tlog(self.makePrint('Test', args.epoch, reses, True))\n","\t\tself.saveHistory()\n","\n","\tdef multiHeadAttention(self, localReps, glbRep, number, numHeads, inpDim):\n","\t\tquery = tf.reshape(tf.tile(tf.reshape(FC(glbRep, inpDim, useBias=True, reg=True), [-1, 1, inpDim]), [1, number, 1]), [-1, numHeads, inpDim//numHeads])\n","\t\ttemLocals = tf.reshape(localReps, [-1, inpDim])\n","\t\tkey = tf.reshape(FC(temLocals, inpDim, useBias=True, reg=True), [-1, numHeads, inpDim//numHeads])\n","\t\tval = tf.reshape(FC(temLocals, inpDim, useBias=True, reg=True), [-1, number, numHeads, inpDim//numHeads])\n","\t\tatt = tf.nn.softmax(2*tf.reshape(tf.reduce_sum(query * key, axis=-1), [-1, number, numHeads, 1]), axis=1)\n","\t\tattRep = tf.reshape(tf.reduce_sum(val * att, axis=1), [-1, inpDim])\n","\t\treturn attRep\n","\n","\tdef selfAttention(self, localReps, number, inpDim):\n","\t\tattReps = [None] * number\n","\t\tstkReps = tf.stack(localReps, axis=1)\n","\t\tfor i in range(number):\n","\t\t\tglbRep = localReps[i]\n","\t\t\ttemAttRep = self.multiHeadAttention(stkReps, glbRep, number=number, numHeads=args.att_head, inpDim=inpDim) + glbRep\n","\t\t\t# fc1 = FC(temAttRep, inpDim, reg=True, useBias=True, activation='relu') + temAttRep\n","\t\t\t# fc2 = FC(fc1, inpDim, reg=True, useBias=True, activation='relu') + fc1\n","\t\t\tattReps[i] = temAttRep#fc2\n","\t\treturn attReps\n","\n","\tdef divide(self, interaction):\n","\t\tret = [None] * self.intTypes\n","\t\tfor i in range(self.intTypes):\n","\t\t\tret[i] = tf.to_float(tf.bitwise.bitwise_and(interaction, (2**i)) / (2**i))\n","\t\treturn ret\n","\n","\tdef mine(self, interaction):\n","\t\tactivation = 'relu'\n","\t\tV = defineParam('v', [self.inpDim, args.latdim], reg=True)\n","\t\tdivideLst = self.divide(interaction)\n","\t\tcatlat1 = []\n","\t\tfor dividInp in divideLst:\n","\t\t\tcatlat1.append(dividInp @ V)\n","\t\tcatlat2 = self.selfAttention(catlat1, number=self.intTypes, inpDim=args.latdim)\n","\t\tcatlat3 = list()\n","\t\tself.memoAtt = []\n","\t\tfor i in range(self.intTypes):\n","\t\t\tresCatlat = catlat2[i] + catlat1[i]\n","\t\t\tmemoatt = FC(resCatlat, args.memosize, activation='relu', reg=True, useBias=True)\n","\t\t\tmemoTrans = tf.reshape(FC(memoatt, args.latdim**2, reg=True, name='memoTrans'), [-1, args.latdim, args.latdim])\n","\t\t\tself.memoAtt.append(memoatt)\n","\n","\t\t\ttem = tf.reshape(resCatlat, [-1, 1, args.latdim])\n","\t\t\ttransCatlat = tf.reshape(tem @ memoTrans, [-1, args.latdim])\n","\t\t\tcatlat3.append(transCatlat)\n","\n","\t\tstkCatlat3 = tf.stack(catlat3, axis=1)\n","\n","\t\tweights = defineParam('fuseAttWeight', [1, self.intTypes, 1], reg=True, initializer='zeros')\n","\t\tsftW = tf.nn.softmax(weights*2, axis=1)\n","\t\tfusedLat = tf.reduce_sum(sftW * stkCatlat3, axis=1)\n","\t\tself.memoAtt = tf.stack(self.memoAtt, axis=1)\n","\n","\t\tlat = fusedLat\n","\t\tfor i in range(2):\n","\t\t\tlat = FC(lat, args.latdim, useBias=True, reg=True, activation=activation) + lat\n","\t\treturn lat\n","\n","\tdef prepareModel(self):\n","\t\tself.intTypes = 4\n","\t\tself.interaction = tf.placeholder(dtype=tf.int32, shape=[None, self.inpDim], name='interaction')\n","\t\tself.posLabel = tf.placeholder(dtype=tf.int32, shape=[None, None], name='posLabel')\n","\t\tself.negLabel = tf.placeholder(dtype=tf.int32, shape=[None, None], name='negLabel')\n","\t\tintEmbed = tf.reshape(self.mine(self.interaction), [-1, 1, args.latdim])\n","\t\tself.learnedEmbed = tf.reshape(intEmbed, [-1, args.latdim])\n","\n","\t\tW = defineParam('W', [self.inpDim, args.latdim], reg=True)\n","\t\tposEmbeds = tf.transpose(tf.nn.embedding_lookup(W, self.posLabel), [0, 2, 1])\n","\t\tnegEmbeds = tf.transpose(tf.nn.embedding_lookup(W, self.negLabel), [0, 2, 1])\n","\t\tsampnum = tf.shape(self.posLabel)[1]\n","\n","\t\tposPred = tf.reshape(intEmbed @ posEmbeds, [-1, sampnum])\n","\t\tnegPred = tf.reshape(intEmbed @ negEmbeds, [-1, sampnum])\n","\t\tself.posPred = posPred\n","\n","\t\tself.preLoss = tf.reduce_mean(tf.reduce_sum(tf.maximum(0.0, 1.0 - (posPred - negPred)), axis=-1))\n","\t\tself.regLoss = args.reg * Regularize(method='L2')\n","\t\tself.loss = self.preLoss + self.regLoss\n","\n","\t\tglobalStep = tf.Variable(0, trainable=False)\n","\t\tlearningRate = tf.train.exponential_decay(args.lr, globalStep, args.decay_step, args.decay, staircase=True)\n","\t\tself.optimizer = tf.train.AdamOptimizer(learningRate).minimize(self.loss, global_step=globalStep)\n","\n","\tdef trainEpoch(self):\n","\t\ttrnMat = self.trnMat\n","\t\tnum = trnMat.shape[0]\n","\t\ttrnSfIds = np.random.permutation(num)[:args.trn_num]\n","\t\ttstSfIds = self.tstUsrs\n","\t\tsfIds = np.random.permutation(np.concatenate((trnSfIds, tstSfIds)))\n","\t\t# sfIds = trnSfIds\n","\t\tepochLoss, epochPreLoss = [0] * 2\n","\t\tnum = len(sfIds)\n","\t\tsteps = int(np.ceil(num / args.batch))\n","\n","\t\tfor i in range(steps):\n","\t\t\tcurLst = list(np.random.permutation(self.inpDim))\n","\t\t\tst = i * args.batch\n","\t\t\ted = min((i+1) * args.batch, num)\n","\t\t\tbatchIds = sfIds[st: ed]\n","\n","\t\t\ttemTrn = trnMat[batchIds].toarray()\n","\t\t\ttembuy = self.buyMat[batchIds].toarray()\n","\n","\t\t\ttemPos = [[None]*(args.posbat*args.negsamp) for i in range(len(batchIds))]\n","\t\t\ttemNeg = [[None]*(args.posbat*args.negsamp) for i in range(len(batchIds))]\n","\t\t\tfor ii in range(len(batchIds)):\n","\t\t\t\trow = batchIds[ii]\n","\t\t\t\tposset = np.reshape(np.argwhere(tembuy[ii]!=0), [-1])\n","\t\t\t\tnegset = negSamp(tembuy[ii], curLst)\n","\t\t\t\tidx = 0\n","\t\t\t\t# if len(posset) == 0:\n","\t\t\t\t# \tposset = np.random.choice(list(range(args.item)), args.posbat)\n","\t\t\t\tfor j in np.random.choice(posset, args.posbat):\n","\t\t\t\t\tfor k in np.random.choice(negset, args.negsamp):\n","\t\t\t\t\t\ttemPos[ii][idx] = j\n","\t\t\t\t\t\ttemNeg[ii][idx] = k\n","\t\t\t\t\t\tidx += 1\n","\t\t\ttarget = [self.optimizer, self.preLoss, self.regLoss, self.loss]\n","\t\t\tres = self.sess.run(target, feed_dict={self.interaction: (temTrn).astype('int32'),\n","\t\t\t\tself.posLabel: temPos, self.negLabel: temNeg\n","\t\t\t\t}, options=config_pb2.RunOptions(report_tensor_allocations_upon_oom=True))\n","\t\t\tpreLoss, regLoss, loss = res[1:]\n","\n","\t\t\tepochLoss += loss\n","\t\t\tepochPreLoss += preLoss\n","\t\t\tlog('Step %d/%d: loss = %.2f, regLoss = %.2f ' %\\\n","\t\t\t\t(i, steps, loss, regLoss), save=False, oneline=True)\n","\t\tret = dict()\n","\t\tret['Loss'] = epochLoss / steps\n","\t\tret['preLoss'] = epochPreLoss / steps\n","\t\treturn ret\n","\n","\tdef testEpoch(self):\n","\t\ttrnMat = self.trnMat\n","\t\ttstInt = self.tstInt\n","\t\tepochHit, epochNdcg = [0] * 2\n","\t\tids = self.tstUsrs\n","\t\tnum = len(ids)\n","\t\ttestbatch = args.batch\n","\t\tsteps = int(np.ceil(num / testbatch))\n","\t\tfor i in range(steps):\n","\t\t\tst = i * testbatch\n","\t\t\ted = min((i+1) * testbatch, num)\n","\t\t\tbatchIds = ids[st:ed]\n","\n","\t\t\ttemTrn = trnMat[batchIds].toarray()\n","\t\t\ttemTst = tstInt[batchIds]\n","\t\t\ttembuy = self.buyMat[batchIds].toarray()\n","\n","\t\t\t# get test locations\n","\t\t\ttstLocs = [None] * len(batchIds)\n","\t\t\tfor j in range(len(batchIds)):\n","\t\t\t\tnegset = np.reshape(np.argwhere(tembuy[j]==0), [-1])\n","\t\t\t\trdnNegSet = np.random.permutation(negset)\n","\t\t\t\ttstLocs[j] = list(rdnNegSet[:99])\n","\t\t\t\ttem = ([rdnNegSet[99]] if temTst[j] in tstLocs[j] else [temTst[j]])\n","\t\t\t\ttstLocs[j] = tstLocs[j] + tem\n","\n","\t\t\tpreds = self.sess.run(self.posPred, feed_dict={self.interaction:temTrn.astype('int32'), self.posLabel: tstLocs}, options=config_pb2.RunOptions(report_tensor_allocations_upon_oom=True))\n","\n","\t\t\thit, ndcg = self.calcRes(preds, temTst, tstLocs)\n","\t\t\tepochHit += hit\n","\t\t\tepochNdcg += ndcg\n","\t\t\tlog('Step %d/%d: hit = %d, ndcg = %d ' %\\\n","\t\t\t\t(i, steps, hit, ndcg), save=False, oneline=True)\n","\t\tret = dict()\n","\t\tret['HR'] = epochHit / num\n","\t\tret['NDCG'] = epochNdcg / num\n","\t\treturn ret\n","\n","\tdef calcRes(self, preds, temTst, tstLocs):\n","\t\thit = 0\n","\t\tndcg = 0\n","\t\tfor j in range(preds.shape[0]):\n","\t\t\tpredvals = list(zip(preds[j], tstLocs[j]))\n","\t\t\tpredvals.sort(key=lambda x: x[0], reverse=True)\n","\t\t\tshoot = list(map(lambda x: x[1], predvals[:args.shoot]))\n","\t\t\tif temTst[j] in shoot:\n","\t\t\t\thit += 1\n","\t\t\t\tndcg += np.reciprocal(np.log2(shoot.index(temTst[j])+2))\n","\t\treturn hit, ndcg\n","\t\n","\tdef saveHistory(self):\n","\t\tif args.epoch == 0:\n","\t\t\treturn\n","\t\twith open('History/' + args.save_path + '.his', 'wb') as fs:\n","\t\t\tpickle.dump(self.metrics, fs)\n","\n","\t\tsaver = tf.train.Saver()\n","\t\tsaver.save(self.sess, 'Models/' + args.save_path)\n","\t\tlog('Model Saved: %s' % args.save_path)\n","\n","\tdef loadModel(self):\n","\t\tsaver = tf.train.Saver()\n","\t\tsaver.restore(sess, 'Models/' + args.load_model)\n","\t\twith open('History/' + args.load_model + '.his', 'rb') as fs:\n","\t\t\tself.metrics = pickle.load(fs)\n","\t\tlog('Model Loaded')\t\n","\n","if __name__ == '__main__':\n","\tsaveDefault = True\n","\tconfig = tf.ConfigProto()\n","\tconfig.gpu_options.allow_growth = True\n","\n","\tlog('Start')\n","\tdatas = LoadData()\n","\tlog('Load Data')\n","\n","\twith tf.Session(config=config) as sess:\n","\t\trecom = Recommender(sess, datas, args.item)\n","\t\trecom.run()"],"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["2021-10-13 08:21:57.938067: Start\n","2021-10-13 08:21:58.005996: Load Data\n","WARNING:tensorflow:From :71: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n","Instructions for updating:\n","Use `tf.cast` instead.\n","WARNING:tensorflow:From /tensorflow-1.15.2/python3.7/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n","Instructions for updating:\n","Use tf.where in 2.0, which has the same broadcast rule as np.where\n","2021-10-13 08:22:00.422211: Model Prepared\n","2021-10-13 08:22:00.979723: Variables Inited\n","2021-10-13 08:23:00.035440: Epoch 0/12, Train: Loss = 37.1899, preLoss = 35.8160 \n","2021-10-13 08:23:12.182542: Epoch 0/12, Test: HR = 0.3306, NDCG = 0.1771 \n","2021-10-13 08:23:12.621136: Model Saved: tem\n","\n","2021-10-13 08:24:11.020664: Epoch 1/12, Train: Loss = 25.9994, preLoss = 24.2789 \n","\n","2021-10-13 08:25:09.313385: Epoch 2/12, Train: Loss = 20.5651, preLoss = 18.6902 \n","\n","2021-10-13 08:26:07.765660: Epoch 3/12, Train: Loss = 18.4499, preLoss = 16.5704 \n","2021-10-13 08:26:19.691564: Epoch 3/12, Test: HR = 0.4413, NDCG = 0.2375 \n","\n","2021-10-13 08:27:17.658203: Epoch 4/12, Train: Loss = 17.4562, preLoss = 15.6297 \n","\n","2021-10-13 08:28:15.284263: Epoch 5/12, Train: Loss = 16.9354, preLoss = 15.2013 \n","2021-10-13 08:28:15.721673: Model Saved: tem\n","\n","2021-10-13 08:29:12.905669: Epoch 6/12, Train: Loss = 16.6569, preLoss = 15.0294 \n","2021-10-13 08:29:24.723370: Epoch 6/12, Test: HR = 0.4530, NDCG = 0.2438 \n","\n","2021-10-13 08:30:22.763829: Epoch 7/12, Train: Loss = 16.3886, preLoss = 14.8580 \n","\n","2021-10-13 08:31:20.847308: Epoch 8/12, Train: Loss = 16.2173, preLoss = 14.7782 \n","\n","2021-10-13 08:32:18.827044: Epoch 9/12, Train: Loss = 16.0591, preLoss = 14.7042 \n","2021-10-13 08:32:30.636556: Epoch 9/12, Test: HR = 0.4546, NDCG = 0.2477 \n","\n","2021-10-13 08:33:28.724700: Epoch 10/12, Train: Loss = 15.9482, preLoss = 14.6634 \n","2021-10-13 08:33:29.182999: Model Saved: tem\n","\n","2021-10-13 08:34:27.757194: Epoch 11/12, Train: Loss = 15.6551, preLoss = 14.4224 \n","\n","2021-10-13 08:34:39.632292: Epoch 12/12, Test: HR = 0.4688, NDCG = 0.2555 \n","2021-10-13 08:34:40.116667: Model Saved: tem\n"]}]}]}