{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"2022-01-22-graph-sdne.ipynb","provenance":[{"file_id":"https://github.com/recohut/nbs/blob/main/raw/T189677%20%7C%20Graph%20embeddings%20using%20SDNE.ipynb","timestamp":1644661685358}],"collapsed_sections":[],"toc_visible":true,"authorship_tag":"ABX9TyO1c8BpIDq/+NwLxICqEus/"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"markdown","metadata":{"id":"GDRzELU-ZruG"},"source":["# Graph embeddings using SDNE"]},{"cell_type":"code","metadata":{"id":"DIZ33_S_Bxrz"},"source":["!pip install git+https://github.com/palash1992/GEM.git\n","!pip install -U Ipython"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"hpvRCtZPQR6M","executionInfo":{"status":"ok","timestamp":1627981550057,"user_tz":-330,"elapsed":771,"user":{"displayName":"Sparsh Agarwal","photoUrl":"","userId":"13037694610922482904"}},"outputId":"64318234-214a-4e88-a0bb-2e353520c4cc"},"source":["%tensorflow_version 1.x"],"execution_count":null,"outputs":[{"output_type":"stream","text":["TensorFlow 1.x selected.\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"q_TCzoMJQSbG"},"source":["from gem.embedding.sdne import SDNE\n","from matplotlib import pyplot as plt\n","from IPython.display import Code\n","import networkx as nx\n","import inspect"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"CgRJ2RVuQ1Xw","executionInfo":{"status":"ok","timestamp":1627981590252,"user_tz":-330,"elapsed":21,"user":{"displayName":"Sparsh Agarwal","photoUrl":"","userId":"13037694610922482904"}},"outputId":"98811583-f2c3-4af8-d602-04d6d891ba84"},"source":["Code(inspect.getsource(SDNE), language='python')"],"execution_count":null,"outputs":[{"output_type":"execute_result","data":{"text/latex":"\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n\\PY{k}{class} \\PY{n+nc}{SDNE}\\PY{p}{(}\\PY{n}{StaticGraphEmbedding}\\PY{p}{)}\\PY{p}{:}\n\n \\PY{k}{def} \\PY{n+nf+fm}{\\PYZus{}\\PYZus{}init\\PYZus{}\\PYZus{}}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{p}{,} \\PY{o}{*}\\PY{n}{hyper\\PYZus{}dict}\\PY{p}{,} \\PY{o}{*}\\PY{o}{*}\\PY{n}{kwargs}\\PY{p}{)}\\PY{p}{:}\n \\PY{l+s+sd}{\\PYZsq{}\\PYZsq{}\\PYZsq{} Initialize the SDNE class}\n\n\\PY{l+s+sd}{ Args:}\n\\PY{l+s+sd}{ d: dimension of the embedding}\n\\PY{l+s+sd}{ beta: penalty parameter in matrix B of 2nd order objective}\n\\PY{l+s+sd}{ alpha: weighing hyperparameter for 1st order objective}\n\\PY{l+s+sd}{ nu1: L1\\PYZhy{}reg hyperparameter}\n\\PY{l+s+sd}{ nu2: L2\\PYZhy{}reg hyperparameter}\n\\PY{l+s+sd}{ K: number of hidden layers in encoder/decoder}\n\\PY{l+s+sd}{ n\\PYZus{}units: vector of length K\\PYZhy{}1 containing \\PYZsh{}units in hidden layers}\n\\PY{l+s+sd}{ of encoder/decoder, not including the units in the}\n\\PY{l+s+sd}{ embedding layer}\n\\PY{l+s+sd}{ rho: bounding ratio for number of units in consecutive layers (\\PYZlt{} 1)}\n\\PY{l+s+sd}{ n\\PYZus{}iter: number of sgd iterations for first embedding (const)}\n\\PY{l+s+sd}{ xeta: sgd step size parameter}\n\\PY{l+s+sd}{ n\\PYZus{}batch: minibatch size for SGD}\n\\PY{l+s+sd}{ modelfile: Files containing previous encoder and decoder models}\n\\PY{l+s+sd}{ weightfile: Files containing previous encoder and decoder weights}\n\\PY{l+s+sd}{ \\PYZsq{}\\PYZsq{}\\PYZsq{}}\n \\PY{n}{hyper\\PYZus{}params} \\PY{o}{=} \\PY{p}{\\PYZob{}}\n \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{method\\PYZus{}name}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{:} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{sdne}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{,}\n \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{actfn}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{:} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{relu}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{,}\n \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{modelfile}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{:} \\PY{k+kc}{None}\\PY{p}{,}\n \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{weightfile}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{:} \\PY{k+kc}{None}\\PY{p}{,}\n \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{savefilesuffix}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{:} \\PY{k+kc}{None}\n\n \\PY{p}{\\PYZcb{}}\n \\PY{n}{hyper\\PYZus{}params}\\PY{o}{.}\\PY{n}{update}\\PY{p}{(}\\PY{n}{kwargs}\\PY{p}{)}\n \\PY{k}{for} \\PY{n}{key} \\PY{o+ow}{in} \\PY{n}{hyper\\PYZus{}params}\\PY{o}{.}\\PY{n}{keys}\\PY{p}{(}\\PY{p}{)}\\PY{p}{:}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n+nf+fm}{\\PYZus{}\\PYZus{}setattr\\PYZus{}\\PYZus{}}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{\\PYZus{}}\\PY{l+s+si}{\\PYZpc{}s}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{\\PYZpc{}} \\PY{n}{key}\\PY{p}{,} \\PY{n}{hyper\\PYZus{}params}\\PY{p}{[}\\PY{n}{key}\\PY{p}{]}\\PY{p}{)}\n \\PY{k}{for} \\PY{n}{dictionary} \\PY{o+ow}{in} \\PY{n}{hyper\\PYZus{}dict}\\PY{p}{:}\n \\PY{k}{for} \\PY{n}{key} \\PY{o+ow}{in} \\PY{n}{dictionary}\\PY{p}{:}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n+nf+fm}{\\PYZus{}\\PYZus{}setattr\\PYZus{}\\PYZus{}}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{\\PYZus{}}\\PY{l+s+si}{\\PYZpc{}s}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{\\PYZpc{}} \\PY{n}{key}\\PY{p}{,} \\PY{n}{dictionary}\\PY{p}{[}\\PY{n}{key}\\PY{p}{]}\\PY{p}{)}\n\n \\PY{k}{def} \\PY{n+nf}{get\\PYZus{}method\\PYZus{}name}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{p}{)}\\PY{p}{:}\n \\PY{k}{return} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}method\\PYZus{}name}\n\n \\PY{k}{def} \\PY{n+nf}{get\\PYZus{}method\\PYZus{}summary}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{p}{)}\\PY{p}{:}\n \\PY{k}{return} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+si}{\\PYZpc{}s}\\PY{l+s+s1}{\\PYZus{}}\\PY{l+s+si}{\\PYZpc{}d}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{\\PYZpc{}} \\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}method\\PYZus{}name}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}d}\\PY{p}{)}\n\n \\PY{k}{def} \\PY{n+nf}{learn\\PYZus{}embedding}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{p}{,} \\PY{n}{graph}\\PY{o}{=}\\PY{k+kc}{None}\\PY{p}{,} \\PY{n}{edge\\PYZus{}f}\\PY{o}{=}\\PY{k+kc}{None}\\PY{p}{,}\n \\PY{n}{is\\PYZus{}weighted}\\PY{o}{=}\\PY{k+kc}{False}\\PY{p}{,} \\PY{n}{no\\PYZus{}python}\\PY{o}{=}\\PY{k+kc}{False}\\PY{p}{)}\\PY{p}{:}\n \\PY{k}{if} \\PY{o+ow}{not} \\PY{n}{graph} \\PY{o+ow}{and} \\PY{o+ow}{not} \\PY{n}{edge\\PYZus{}f}\\PY{p}{:}\n \\PY{k}{raise} \\PY{n+ne}{Exception}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{graph/edge\\PYZus{}f needed}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{)}\n \\PY{k}{if} \\PY{o+ow}{not} \\PY{n}{graph}\\PY{p}{:}\n \\PY{n}{graph} \\PY{o}{=} \\PY{n}{graph\\PYZus{}util}\\PY{o}{.}\\PY{n}{loadGraphFromEdgeListTxt}\\PY{p}{(}\\PY{n}{edge\\PYZus{}f}\\PY{p}{)}\n \\PY{n}{S} \\PY{o}{=} \\PY{n}{nx}\\PY{o}{.}\\PY{n}{to\\PYZus{}scipy\\PYZus{}sparse\\PYZus{}matrix}\\PY{p}{(}\\PY{n}{graph}\\PY{p}{)}\n \\PY{n}{t1} \\PY{o}{=} \\PY{n}{time}\\PY{p}{(}\\PY{p}{)}\n \\PY{n}{S} \\PY{o}{=} \\PY{p}{(}\\PY{n}{S} \\PY{o}{+} \\PY{n}{S}\\PY{o}{.}\\PY{n}{T}\\PY{p}{)} \\PY{o}{/} \\PY{l+m+mi}{2}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num} \\PY{o}{=} \\PY{n+nb}{len}\\PY{p}{(}\\PY{n}{graph}\\PY{o}{.}\\PY{n}{nodes}\\PY{p}{)}\n\n \\PY{c+c1}{\\PYZsh{} Generate encoder, decoder and autoencoder}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}num\\PYZus{}iter} \\PY{o}{=} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}n\\PYZus{}iter}\n \\PY{c+c1}{\\PYZsh{} If cannot use previous step information, initialize new models}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}encoder} \\PY{o}{=} \\PY{n}{get\\PYZus{}encoder}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}d}\\PY{p}{,}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}K}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}n\\PYZus{}units}\\PY{p}{,}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}nu1}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}nu2}\\PY{p}{,}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}actfn}\\PY{p}{)}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}decoder} \\PY{o}{=} \\PY{n}{get\\PYZus{}decoder}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}d}\\PY{p}{,}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}K}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}n\\PYZus{}units}\\PY{p}{,}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}nu1}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}nu2}\\PY{p}{,}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}actfn}\\PY{p}{)}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}autoencoder} \\PY{o}{=} \\PY{n}{get\\PYZus{}autoencoder}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}encoder}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}decoder}\\PY{p}{)}\n\n \\PY{c+c1}{\\PYZsh{} Initialize self.\\PYZus{}model}\n \\PY{c+c1}{\\PYZsh{} Input}\n \\PY{n}{x\\PYZus{}in} \\PY{o}{=} \\PY{n}{Input}\\PY{p}{(}\\PY{n}{shape}\\PY{o}{=}\\PY{p}{(}\\PY{l+m+mi}{2} \\PY{o}{*} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num}\\PY{p}{,}\\PY{p}{)}\\PY{p}{,} \\PY{n}{name}\\PY{o}{=}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{x\\PYZus{}in}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{)}\n \\PY{n}{x1} \\PY{o}{=} \\PY{n}{Lambda}\\PY{p}{(}\n \\PY{k}{lambda} \\PY{n}{x}\\PY{p}{:} \\PY{n}{x}\\PY{p}{[}\\PY{p}{:}\\PY{p}{,} \\PY{l+m+mi}{0}\\PY{p}{:}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num}\\PY{p}{]}\\PY{p}{,}\n \\PY{n}{output\\PYZus{}shape}\\PY{o}{=}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num}\\PY{p}{,}\\PY{p}{)}\n \\PY{p}{)}\\PY{p}{(}\\PY{n}{x\\PYZus{}in}\\PY{p}{)}\n \\PY{n}{x2} \\PY{o}{=} \\PY{n}{Lambda}\\PY{p}{(}\n \\PY{k}{lambda} \\PY{n}{x}\\PY{p}{:} \\PY{n}{x}\\PY{p}{[}\\PY{p}{:}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num}\\PY{p}{:}\\PY{l+m+mi}{2} \\PY{o}{*} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num}\\PY{p}{]}\\PY{p}{,}\n \\PY{n}{output\\PYZus{}shape}\\PY{o}{=}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num}\\PY{p}{,}\\PY{p}{)}\n \\PY{p}{)}\\PY{p}{(}\\PY{n}{x\\PYZus{}in}\\PY{p}{)}\n \\PY{c+c1}{\\PYZsh{} Process inputs}\n \\PY{p}{[}\\PY{n}{x\\PYZus{}hat1}\\PY{p}{,} \\PY{n}{y1}\\PY{p}{]} \\PY{o}{=} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}autoencoder}\\PY{p}{(}\\PY{n}{x1}\\PY{p}{)}\n \\PY{p}{[}\\PY{n}{x\\PYZus{}hat2}\\PY{p}{,} \\PY{n}{y2}\\PY{p}{]} \\PY{o}{=} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}autoencoder}\\PY{p}{(}\\PY{n}{x2}\\PY{p}{)}\n \\PY{c+c1}{\\PYZsh{} Outputs}\n \\PY{n}{x\\PYZus{}diff1} \\PY{o}{=} \\PY{n}{merge}\\PY{p}{(}\\PY{p}{[}\\PY{n}{x\\PYZus{}hat1}\\PY{p}{,} \\PY{n}{x1}\\PY{p}{]}\\PY{p}{,}\n \\PY{n}{mode}\\PY{o}{=}\\PY{k}{lambda} \\PY{n}{ab}\\PY{p}{:} \\PY{n}{ab}\\PY{p}{[}\\PY{l+m+mi}{0}\\PY{p}{]} \\PY{o}{\\PYZhy{}} \\PY{n}{ab}\\PY{p}{[}\\PY{l+m+mi}{1}\\PY{p}{]}\\PY{p}{,}\n \\PY{n}{output\\PYZus{}shape}\\PY{o}{=}\\PY{k}{lambda} \\PY{n}{L}\\PY{p}{:} \\PY{n}{L}\\PY{p}{[}\\PY{l+m+mi}{1}\\PY{p}{]}\\PY{p}{)}\n \\PY{n}{x\\PYZus{}diff2} \\PY{o}{=} \\PY{n}{merge}\\PY{p}{(}\\PY{p}{[}\\PY{n}{x\\PYZus{}hat2}\\PY{p}{,} \\PY{n}{x2}\\PY{p}{]}\\PY{p}{,}\n \\PY{n}{mode}\\PY{o}{=}\\PY{k}{lambda} \\PY{n}{ab}\\PY{p}{:} \\PY{n}{ab}\\PY{p}{[}\\PY{l+m+mi}{0}\\PY{p}{]} \\PY{o}{\\PYZhy{}} \\PY{n}{ab}\\PY{p}{[}\\PY{l+m+mi}{1}\\PY{p}{]}\\PY{p}{,}\n \\PY{n}{output\\PYZus{}shape}\\PY{o}{=}\\PY{k}{lambda} \\PY{n}{L}\\PY{p}{:} \\PY{n}{L}\\PY{p}{[}\\PY{l+m+mi}{1}\\PY{p}{]}\\PY{p}{)}\n \\PY{n}{y\\PYZus{}diff} \\PY{o}{=} \\PY{n}{merge}\\PY{p}{(}\\PY{p}{[}\\PY{n}{y2}\\PY{p}{,} \\PY{n}{y1}\\PY{p}{]}\\PY{p}{,}\n \\PY{n}{mode}\\PY{o}{=}\\PY{k}{lambda} \\PY{n}{ab}\\PY{p}{:} \\PY{n}{ab}\\PY{p}{[}\\PY{l+m+mi}{0}\\PY{p}{]} \\PY{o}{\\PYZhy{}} \\PY{n}{ab}\\PY{p}{[}\\PY{l+m+mi}{1}\\PY{p}{]}\\PY{p}{,}\n \\PY{n}{output\\PYZus{}shape}\\PY{o}{=}\\PY{k}{lambda} \\PY{n}{L}\\PY{p}{:} \\PY{n}{L}\\PY{p}{[}\\PY{l+m+mi}{1}\\PY{p}{]}\\PY{p}{)}\n\n \\PY{c+c1}{\\PYZsh{} Objectives}\n \\PY{k}{def} \\PY{n+nf}{weighted\\PYZus{}mse\\PYZus{}x}\\PY{p}{(}\\PY{n}{y\\PYZus{}true}\\PY{p}{,} \\PY{n}{y\\PYZus{}pred}\\PY{p}{)}\\PY{p}{:}\n \\PY{l+s+sd}{\\PYZsq{}\\PYZsq{}\\PYZsq{} Hack: This fn doesn\\PYZsq{}t accept additional arguments.}\n\\PY{l+s+sd}{ We use y\\PYZus{}true to pass them.}\n\\PY{l+s+sd}{ y\\PYZus{}pred: Contains x\\PYZus{}hat \\PYZhy{} x}\n\\PY{l+s+sd}{ y\\PYZus{}true: Contains [b, deg]}\n\\PY{l+s+sd}{ \\PYZsq{}\\PYZsq{}\\PYZsq{}}\n \\PY{k}{return} \\PY{n}{KBack}\\PY{o}{.}\\PY{n}{sum}\\PY{p}{(}\n \\PY{n}{KBack}\\PY{o}{.}\\PY{n}{square}\\PY{p}{(}\\PY{n}{y\\PYZus{}pred} \\PY{o}{*} \\PY{n}{y\\PYZus{}true}\\PY{p}{[}\\PY{p}{:}\\PY{p}{,} \\PY{l+m+mi}{0}\\PY{p}{:}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num}\\PY{p}{]}\\PY{p}{)}\\PY{p}{,}\n \\PY{n}{axis}\\PY{o}{=}\\PY{o}{\\PYZhy{}}\\PY{l+m+mi}{1}\\PY{p}{)} \\PY{o}{/} \\PY{n}{y\\PYZus{}true}\\PY{p}{[}\\PY{p}{:}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}node\\PYZus{}num}\\PY{p}{]}\n\n \\PY{k}{def} \\PY{n+nf}{weighted\\PYZus{}mse\\PYZus{}y}\\PY{p}{(}\\PY{n}{y\\PYZus{}true}\\PY{p}{,} \\PY{n}{y\\PYZus{}pred}\\PY{p}{)}\\PY{p}{:}\n \\PY{l+s+sd}{\\PYZsq{}\\PYZsq{}\\PYZsq{} Hack: This fn doesn\\PYZsq{}t accept additional arguments.}\n\\PY{l+s+sd}{ We use y\\PYZus{}true to pass them.}\n\\PY{l+s+sd}{ y\\PYZus{}pred: Contains y2 \\PYZhy{} y1}\n\\PY{l+s+sd}{ y\\PYZus{}true: Contains s12}\n\\PY{l+s+sd}{ \\PYZsq{}\\PYZsq{}\\PYZsq{}}\n \\PY{n}{min\\PYZus{}batch\\PYZus{}size} \\PY{o}{=} \\PY{n}{KBack}\\PY{o}{.}\\PY{n}{shape}\\PY{p}{(}\\PY{n}{y\\PYZus{}true}\\PY{p}{)}\\PY{p}{[}\\PY{l+m+mi}{0}\\PY{p}{]}\n \\PY{k}{return} \\PY{n}{KBack}\\PY{o}{.}\\PY{n}{reshape}\\PY{p}{(}\n \\PY{n}{KBack}\\PY{o}{.}\\PY{n}{sum}\\PY{p}{(}\\PY{n}{KBack}\\PY{o}{.}\\PY{n}{square}\\PY{p}{(}\\PY{n}{y\\PYZus{}pred}\\PY{p}{)}\\PY{p}{,} \\PY{n}{axis}\\PY{o}{=}\\PY{o}{\\PYZhy{}}\\PY{l+m+mi}{1}\\PY{p}{)}\\PY{p}{,}\n \\PY{p}{[}\\PY{n}{min\\PYZus{}batch\\PYZus{}size}\\PY{p}{,} \\PY{l+m+mi}{1}\\PY{p}{]}\n \\PY{p}{)} \\PY{o}{*} \\PY{n}{y\\PYZus{}true}\n\n \\PY{c+c1}{\\PYZsh{} Model}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}model} \\PY{o}{=} \\PY{n}{Model}\\PY{p}{(}\\PY{n+nb}{input}\\PY{o}{=}\\PY{n}{x\\PYZus{}in}\\PY{p}{,} \\PY{n}{output}\\PY{o}{=}\\PY{p}{[}\\PY{n}{x\\PYZus{}diff1}\\PY{p}{,} \\PY{n}{x\\PYZus{}diff2}\\PY{p}{,} \\PY{n}{y\\PYZus{}diff}\\PY{p}{]}\\PY{p}{)}\n \\PY{n}{sgd} \\PY{o}{=} \\PY{n}{SGD}\\PY{p}{(}\\PY{n}{lr}\\PY{o}{=}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}xeta}\\PY{p}{,} \\PY{n}{decay}\\PY{o}{=}\\PY{l+m+mf}{1e\\PYZhy{}5}\\PY{p}{,} \\PY{n}{momentum}\\PY{o}{=}\\PY{l+m+mf}{0.99}\\PY{p}{,} \\PY{n}{nesterov}\\PY{o}{=}\\PY{k+kc}{True}\\PY{p}{)}\n \\PY{c+c1}{\\PYZsh{} adam = Adam(lr=self.\\PYZus{}xeta, beta\\PYZus{}1=0.9, beta\\PYZus{}2=0.999, epsilon=1e\\PYZhy{}08)}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}model}\\PY{o}{.}\\PY{n}{compile}\\PY{p}{(}\n \\PY{n}{optimizer}\\PY{o}{=}\\PY{n}{sgd}\\PY{p}{,}\n \\PY{n}{loss}\\PY{o}{=}\\PY{p}{[}\\PY{n}{weighted\\PYZus{}mse\\PYZus{}x}\\PY{p}{,} \\PY{n}{weighted\\PYZus{}mse\\PYZus{}x}\\PY{p}{,} \\PY{n}{weighted\\PYZus{}mse\\PYZus{}y}\\PY{p}{]}\\PY{p}{,}\n \\PY{n}{loss\\PYZus{}weights}\\PY{o}{=}\\PY{p}{[}\\PY{l+m+mi}{1}\\PY{p}{,} \\PY{l+m+mi}{1}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}alpha}\\PY{p}{]}\n \\PY{p}{)}\n\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}model}\\PY{o}{.}\\PY{n}{fit\\PYZus{}generator}\\PY{p}{(}\n \\PY{n}{generator}\\PY{o}{=}\\PY{n}{batch\\PYZus{}generator\\PYZus{}sdne}\\PY{p}{(}\\PY{n}{S}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}beta}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}n\\PYZus{}batch}\\PY{p}{,} \\PY{k+kc}{True}\\PY{p}{)}\\PY{p}{,}\n \\PY{n}{nb\\PYZus{}epoch}\\PY{o}{=}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}num\\PYZus{}iter}\\PY{p}{,}\n \\PY{n}{samples\\PYZus{}per\\PYZus{}epoch}\\PY{o}{=}\\PY{n}{S}\\PY{o}{.}\\PY{n}{nonzero}\\PY{p}{(}\\PY{p}{)}\\PY{p}{[}\\PY{l+m+mi}{0}\\PY{p}{]}\\PY{o}{.}\\PY{n}{shape}\\PY{p}{[}\\PY{l+m+mi}{0}\\PY{p}{]} \\PY{o}{/}\\PY{o}{/} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}n\\PYZus{}batch}\\PY{p}{,}\n \\PY{n}{verbose}\\PY{o}{=}\\PY{l+m+mi}{1}\n \\PY{p}{)}\n \\PY{c+c1}{\\PYZsh{} Get embedding for all points}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}Y} \\PY{o}{=} \\PY{n}{model\\PYZus{}batch\\PYZus{}predictor}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}autoencoder}\\PY{p}{,} \\PY{n}{S}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}n\\PYZus{}batch}\\PY{p}{)}\n \\PY{n}{t2} \\PY{o}{=} \\PY{n}{time}\\PY{p}{(}\\PY{p}{)}\n \\PY{c+c1}{\\PYZsh{} Save the autoencoder and its weights}\n \\PY{k}{if}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}weightfile} \\PY{o+ow}{is} \\PY{o+ow}{not} \\PY{k+kc}{None}\\PY{p}{)}\\PY{p}{:}\n \\PY{n}{saveweights}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}encoder}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}weightfile}\\PY{p}{[}\\PY{l+m+mi}{0}\\PY{p}{]}\\PY{p}{)}\n \\PY{n}{saveweights}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}decoder}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}weightfile}\\PY{p}{[}\\PY{l+m+mi}{1}\\PY{p}{]}\\PY{p}{)}\n \\PY{k}{if}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}modelfile} \\PY{o+ow}{is} \\PY{o+ow}{not} \\PY{k+kc}{None}\\PY{p}{)}\\PY{p}{:}\n \\PY{n}{savemodel}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}encoder}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}modelfile}\\PY{p}{[}\\PY{l+m+mi}{0}\\PY{p}{]}\\PY{p}{)}\n \\PY{n}{savemodel}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}decoder}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}modelfile}\\PY{p}{[}\\PY{l+m+mi}{1}\\PY{p}{]}\\PY{p}{)}\n \\PY{k}{if}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}savefilesuffix} \\PY{o+ow}{is} \\PY{o+ow}{not} \\PY{k+kc}{None}\\PY{p}{)}\\PY{p}{:}\n \\PY{n}{saveweights}\\PY{p}{(}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}encoder}\\PY{p}{,}\n \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{encoder\\PYZus{}weights\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{+} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}savefilesuffix} \\PY{o}{+} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.hdf5}\\PY{l+s+s1}{\\PYZsq{}}\n \\PY{p}{)}\n \\PY{n}{saveweights}\\PY{p}{(}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}decoder}\\PY{p}{,}\n \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{decoder\\PYZus{}weights\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{+} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}savefilesuffix} \\PY{o}{+} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.hdf5}\\PY{l+s+s1}{\\PYZsq{}}\n \\PY{p}{)}\n \\PY{n}{savemodel}\\PY{p}{(}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}encoder}\\PY{p}{,}\n \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{encoder\\PYZus{}model\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{+} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}savefilesuffix} \\PY{o}{+} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.json}\\PY{l+s+s1}{\\PYZsq{}}\n \\PY{p}{)}\n \\PY{n}{savemodel}\\PY{p}{(}\n \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}decoder}\\PY{p}{,}\n \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{decoder\\PYZus{}model\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{+} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}savefilesuffix} \\PY{o}{+} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.json}\\PY{l+s+s1}{\\PYZsq{}}\n \\PY{p}{)}\n \\PY{c+c1}{\\PYZsh{} Save the embedding}\n \\PY{n}{np}\\PY{o}{.}\\PY{n}{savetxt}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{embedding\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{+} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}savefilesuffix} \\PY{o}{+} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.txt}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{,} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}Y}\\PY{p}{)}\n \\PY{k}{return} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}Y}\\PY{p}{,} \\PY{p}{(}\\PY{n}{t2} \\PY{o}{\\PYZhy{}} \\PY{n}{t1}\\PY{p}{)}\n\n \\PY{k}{def} \\PY{n+nf}{get\\PYZus{}embedding}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{p}{,} \\PY{n}{filesuffix}\\PY{o}{=}\\PY{k+kc}{None}\\PY{p}{)}\\PY{p}{:}\n \\PY{k}{return} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}Y} \\PY{k}{if} \\PY{n}{filesuffix} \\PY{o+ow}{is} \\PY{k+kc}{None} \\PY{k}{else} \\PY{n}{np}\\PY{o}{.}\\PY{n}{loadtxt}\\PY{p}{(}\n \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{embedding\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{+} \\PY{n}{filesuffix} \\PY{o}{+} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.txt}\\PY{l+s+s1}{\\PYZsq{}}\n \\PY{p}{)}\n\n \\PY{k}{def} \\PY{n+nf}{get\\PYZus{}edge\\PYZus{}weight}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{p}{,} \\PY{n}{i}\\PY{p}{,} \\PY{n}{j}\\PY{p}{,} \\PY{n}{embed}\\PY{o}{=}\\PY{k+kc}{None}\\PY{p}{,} \\PY{n}{filesuffix}\\PY{o}{=}\\PY{k+kc}{None}\\PY{p}{)}\\PY{p}{:}\n \\PY{k}{if} \\PY{n}{embed} \\PY{o+ow}{is} \\PY{k+kc}{None}\\PY{p}{:}\n \\PY{k}{if} \\PY{n}{filesuffix} \\PY{o+ow}{is} \\PY{k+kc}{None}\\PY{p}{:}\n \\PY{n}{embed} \\PY{o}{=} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}Y}\n \\PY{k}{else}\\PY{p}{:}\n \\PY{n}{embed} \\PY{o}{=} \\PY{n}{np}\\PY{o}{.}\\PY{n}{loadtxt}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{embedding\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{+} \\PY{n}{filesuffix} \\PY{o}{+} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.txt}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{)}\n \\PY{k}{if} \\PY{n}{i} \\PY{o}{==} \\PY{n}{j}\\PY{p}{:}\n \\PY{k}{return} \\PY{l+m+mi}{0}\n \\PY{k}{else}\\PY{p}{:}\n \\PY{n}{S\\PYZus{}hat} \\PY{o}{=} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{get\\PYZus{}reconst\\PYZus{}from\\PYZus{}embed}\\PY{p}{(}\\PY{n}{embed}\\PY{p}{[}\\PY{p}{(}\\PY{n}{i}\\PY{p}{,} \\PY{n}{j}\\PY{p}{)}\\PY{p}{,} \\PY{p}{:}\\PY{p}{]}\\PY{p}{,} \\PY{n}{filesuffix}\\PY{p}{)}\n \\PY{k}{return} \\PY{p}{(}\\PY{n}{S\\PYZus{}hat}\\PY{p}{[}\\PY{n}{i}\\PY{p}{,} \\PY{n}{j}\\PY{p}{]} \\PY{o}{+} \\PY{n}{S\\PYZus{}hat}\\PY{p}{[}\\PY{n}{j}\\PY{p}{,} \\PY{n}{i}\\PY{p}{]}\\PY{p}{)} \\PY{o}{/} \\PY{l+m+mi}{2}\n\n \\PY{k}{def} \\PY{n+nf}{get\\PYZus{}reconstructed\\PYZus{}adj}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{p}{,} \\PY{n}{embed}\\PY{o}{=}\\PY{k+kc}{None}\\PY{p}{,} \\PY{n}{node\\PYZus{}l}\\PY{o}{=}\\PY{k+kc}{None}\\PY{p}{,} \\PY{n}{filesuffix}\\PY{o}{=}\\PY{k+kc}{None}\\PY{p}{)}\\PY{p}{:}\n \\PY{k}{if} \\PY{n}{embed} \\PY{o+ow}{is} \\PY{k+kc}{None}\\PY{p}{:}\n \\PY{k}{if} \\PY{n}{filesuffix} \\PY{o+ow}{is} \\PY{k+kc}{None}\\PY{p}{:}\n \\PY{n}{embed} \\PY{o}{=} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}Y}\n \\PY{k}{else}\\PY{p}{:}\n \\PY{n}{embed} \\PY{o}{=} \\PY{n}{np}\\PY{o}{.}\\PY{n}{loadtxt}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{embedding\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{+} \\PY{n}{filesuffix} \\PY{o}{+} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.txt}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{)}\n \\PY{n}{S\\PYZus{}hat} \\PY{o}{=} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{get\\PYZus{}reconst\\PYZus{}from\\PYZus{}embed}\\PY{p}{(}\\PY{n}{embed}\\PY{p}{,} \\PY{n}{node\\PYZus{}l}\\PY{p}{,} \\PY{n}{filesuffix}\\PY{p}{)}\n \\PY{k}{return} \\PY{n}{graphify}\\PY{p}{(}\\PY{n}{S\\PYZus{}hat}\\PY{p}{)}\n\n \\PY{k}{def} \\PY{n+nf}{get\\PYZus{}reconst\\PYZus{}from\\PYZus{}embed}\\PY{p}{(}\\PY{n+nb+bp}{self}\\PY{p}{,} \\PY{n}{embed}\\PY{p}{,} \\PY{n}{node\\PYZus{}l}\\PY{o}{=}\\PY{k+kc}{None}\\PY{p}{,} \\PY{n}{filesuffix}\\PY{o}{=}\\PY{k+kc}{None}\\PY{p}{)}\\PY{p}{:}\n \\PY{k}{if} \\PY{n}{filesuffix} \\PY{o+ow}{is} \\PY{k+kc}{None}\\PY{p}{:}\n \\PY{k}{if} \\PY{n}{node\\PYZus{}l} \\PY{o+ow}{is} \\PY{o+ow}{not} \\PY{k+kc}{None}\\PY{p}{:}\n \\PY{k}{return} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}decoder}\\PY{o}{.}\\PY{n}{predict}\\PY{p}{(}\n \\PY{n}{embed}\\PY{p}{,}\n \\PY{n}{batch\\PYZus{}size}\\PY{o}{=}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}n\\PYZus{}batch}\\PY{p}{)}\\PY{p}{[}\\PY{p}{:}\\PY{p}{,} \\PY{n}{node\\PYZus{}l}\\PY{p}{]}\n \\PY{k}{else}\\PY{p}{:}\n \\PY{k}{return} \\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}decoder}\\PY{o}{.}\\PY{n}{predict}\\PY{p}{(}\\PY{n}{embed}\\PY{p}{,} \\PY{n}{batch\\PYZus{}size}\\PY{o}{=}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}n\\PYZus{}batch}\\PY{p}{)}\n \\PY{k}{else}\\PY{p}{:}\n \\PY{k}{try}\\PY{p}{:}\n \\PY{n}{decoder} \\PY{o}{=} \\PY{n}{model\\PYZus{}from\\PYZus{}json}\\PY{p}{(}\n \\PY{n+nb}{open}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{decoder\\PYZus{}model\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{+} \\PY{n}{filesuffix} \\PY{o}{+} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.json}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{)}\\PY{o}{.}\\PY{n}{read}\\PY{p}{(}\\PY{p}{)}\n \\PY{p}{)}\n \\PY{k}{except}\\PY{p}{:}\n \\PY{n+nb}{print}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{Error reading file: }\\PY{l+s+si}{\\PYZob{}0\\PYZcb{}}\\PY{l+s+s1}{. Cannot load previous model}\\PY{l+s+s1}{\\PYZsq{}}\\PY{o}{.}\\PY{n}{format}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{decoder\\PYZus{}model\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}}\\PY{o}{+}\\PY{n}{filesuffix}\\PY{o}{+}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.json}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{)}\\PY{p}{)}\n \\PY{n}{exit}\\PY{p}{(}\\PY{p}{)}\n \\PY{k}{try}\\PY{p}{:}\n \\PY{n}{decoder}\\PY{o}{.}\\PY{n}{load\\PYZus{}weights}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{decoder\\PYZus{}weights\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}} \\PY{o}{+} \\PY{n}{filesuffix} \\PY{o}{+} \\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.hdf5}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{)}\n \\PY{k}{except}\\PY{p}{:}\n \\PY{n+nb}{print}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{Error reading file: }\\PY{l+s+si}{\\PYZob{}0\\PYZcb{}}\\PY{l+s+s1}{. Cannot load previous weights}\\PY{l+s+s1}{\\PYZsq{}}\\PY{o}{.}\\PY{n}{format}\\PY{p}{(}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{decoder\\PYZus{}weights\\PYZus{}}\\PY{l+s+s1}{\\PYZsq{}}\\PY{o}{+}\\PY{n}{filesuffix}\\PY{o}{+}\\PY{l+s+s1}{\\PYZsq{}}\\PY{l+s+s1}{.hdf5}\\PY{l+s+s1}{\\PYZsq{}}\\PY{p}{)}\\PY{p}{)}\n \\PY{n}{exit}\\PY{p}{(}\\PY{p}{)}\n \\PY{k}{if} \\PY{n}{node\\PYZus{}l} \\PY{o+ow}{is} \\PY{o+ow}{not} \\PY{k+kc}{None}\\PY{p}{:}\n \\PY{k}{return} \\PY{n}{decoder}\\PY{o}{.}\\PY{n}{predict}\\PY{p}{(}\\PY{n}{embed}\\PY{p}{,} \\PY{n}{batch\\PYZus{}size}\\PY{o}{=}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}n\\PYZus{}batch}\\PY{p}{)}\\PY{p}{[}\\PY{p}{:}\\PY{p}{,} \\PY{n}{node\\PYZus{}l}\\PY{p}{]}\n \\PY{k}{else}\\PY{p}{:}\n \\PY{k}{return} \\PY{n}{decoder}\\PY{o}{.}\\PY{n}{predict}\\PY{p}{(}\\PY{n}{embed}\\PY{p}{,} \\PY{n}{batch\\PYZus{}size}\\PY{o}{=}\\PY{n+nb+bp}{self}\\PY{o}{.}\\PY{n}{\\PYZus{}n\\PYZus{}batch}\\PY{p}{)}\n\\end{Verbatim}\n","text/html":["
class SDNE(StaticGraphEmbedding):\n","\n","    def __init__(self, *hyper_dict, **kwargs):\n","        ''' Initialize the SDNE class\n","\n","        Args:\n","            d: dimension of the embedding\n","            beta: penalty parameter in matrix B of 2nd order objective\n","            alpha: weighing hyperparameter for 1st order objective\n","            nu1: L1-reg hyperparameter\n","            nu2: L2-reg hyperparameter\n","            K: number of hidden layers in encoder/decoder\n","            n_units: vector of length K-1 containing #units in hidden layers\n","                     of encoder/decoder, not including the units in the\n","                     embedding layer\n","            rho: bounding ratio for number of units in consecutive layers (< 1)\n","            n_iter: number of sgd iterations for first embedding (const)\n","            xeta: sgd step size parameter\n","            n_batch: minibatch size for SGD\n","            modelfile: Files containing previous encoder and decoder models\n","            weightfile: Files containing previous encoder and decoder weights\n","        '''\n","        hyper_params = {\n","            'method_name': 'sdne',\n","            'actfn': 'relu',\n","            'modelfile': None,\n","            'weightfile': None,\n","            'savefilesuffix': None\n","\n","        }\n","        hyper_params.update(kwargs)\n","        for key in hyper_params.keys():\n","            self.__setattr__('_%s' % key, hyper_params[key])\n","        for dictionary in hyper_dict:\n","            for key in dictionary:\n","                self.__setattr__('_%s' % key, dictionary[key])\n","\n","    def get_method_name(self):\n","        return self._method_name\n","\n","    def get_method_summary(self):\n","        return '%s_%d' % (self._method_name, self._d)\n","\n","    def learn_embedding(self, graph=None, edge_f=None,\n","                        is_weighted=False, no_python=False):\n","        if not graph and not edge_f:\n","            raise Exception('graph/edge_f needed')\n","        if not graph:\n","            graph = graph_util.loadGraphFromEdgeListTxt(edge_f)\n","        S = nx.to_scipy_sparse_matrix(graph)\n","        t1 = time()\n","        S = (S + S.T) / 2\n","        self._node_num = len(graph.nodes)\n","\n","        # Generate encoder, decoder and autoencoder\n","        self._num_iter = self._n_iter\n","        # If cannot use previous step information, initialize new models\n","        self._encoder = get_encoder(self._node_num, self._d,\n","                                    self._K, self._n_units,\n","                                    self._nu1, self._nu2,\n","                                    self._actfn)\n","        self._decoder = get_decoder(self._node_num, self._d,\n","                                    self._K, self._n_units,\n","                                    self._nu1, self._nu2,\n","                                    self._actfn)\n","        self._autoencoder = get_autoencoder(self._encoder, self._decoder)\n","\n","        # Initialize self._model\n","        # Input\n","        x_in = Input(shape=(2 * self._node_num,), name='x_in')\n","        x1 = Lambda(\n","            lambda x: x[:, 0:self._node_num],\n","            output_shape=(self._node_num,)\n","        )(x_in)\n","        x2 = Lambda(\n","            lambda x: x[:, self._node_num:2 * self._node_num],\n","            output_shape=(self._node_num,)\n","        )(x_in)\n","        # Process inputs\n","        [x_hat1, y1] = self._autoencoder(x1)\n","        [x_hat2, y2] = self._autoencoder(x2)\n","        # Outputs\n","        x_diff1 = merge([x_hat1, x1],\n","                        mode=lambda ab: ab[0] - ab[1],\n","                        output_shape=lambda L: L[1])\n","        x_diff2 = merge([x_hat2, x2],\n","                        mode=lambda ab: ab[0] - ab[1],\n","                        output_shape=lambda L: L[1])\n","        y_diff = merge([y2, y1],\n","                       mode=lambda ab: ab[0] - ab[1],\n","                       output_shape=lambda L: L[1])\n","\n","        # Objectives\n","        def weighted_mse_x(y_true, y_pred):\n","            ''' Hack: This fn doesn't accept additional arguments.\n","                      We use y_true to pass them.\n","                y_pred: Contains x_hat - x\n","                y_true: Contains [b, deg]\n","            '''\n","            return KBack.sum(\n","                KBack.square(y_pred * y_true[:, 0:self._node_num]),\n","                axis=-1) / y_true[:, self._node_num]\n","\n","        def weighted_mse_y(y_true, y_pred):\n","            ''' Hack: This fn doesn't accept additional arguments.\n","                      We use y_true to pass them.\n","            y_pred: Contains y2 - y1\n","            y_true: Contains s12\n","            '''\n","            min_batch_size = KBack.shape(y_true)[0]\n","            return KBack.reshape(\n","                KBack.sum(KBack.square(y_pred), axis=-1),\n","                [min_batch_size, 1]\n","            ) * y_true\n","\n","        # Model\n","        self._model = Model(input=x_in, output=[x_diff1, x_diff2, y_diff])\n","        sgd = SGD(lr=self._xeta, decay=1e-5, momentum=0.99, nesterov=True)\n","        # adam = Adam(lr=self._xeta, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n","        self._model.compile(\n","            optimizer=sgd,\n","            loss=[weighted_mse_x, weighted_mse_x, weighted_mse_y],\n","            loss_weights=[1, 1, self._alpha]\n","        )\n","\n","        self._model.fit_generator(\n","            generator=batch_generator_sdne(S, self._beta, self._n_batch, True),\n","            nb_epoch=self._num_iter,\n","            samples_per_epoch=S.nonzero()[0].shape[0] // self._n_batch,\n","            verbose=1\n","        )\n","        # Get embedding for all points\n","        self._Y = model_batch_predictor(self._autoencoder, S, self._n_batch)\n","        t2 = time()\n","        # Save the autoencoder and its weights\n","        if(self._weightfile is not None):\n","            saveweights(self._encoder, self._weightfile[0])\n","            saveweights(self._decoder, self._weightfile[1])\n","        if(self._modelfile is not None):\n","            savemodel(self._encoder, self._modelfile[0])\n","            savemodel(self._decoder, self._modelfile[1])\n","        if(self._savefilesuffix is not None):\n","            saveweights(\n","                self._encoder,\n","                'encoder_weights_' + self._savefilesuffix + '.hdf5'\n","            )\n","            saveweights(\n","                self._decoder,\n","                'decoder_weights_' + self._savefilesuffix + '.hdf5'\n","            )\n","            savemodel(\n","                self._encoder,\n","                'encoder_model_' + self._savefilesuffix + '.json'\n","            )\n","            savemodel(\n","                self._decoder,\n","                'decoder_model_' + self._savefilesuffix + '.json'\n","            )\n","            # Save the embedding\n","            np.savetxt('embedding_' + self._savefilesuffix + '.txt', self._Y)\n","        return self._Y, (t2 - t1)\n","\n","    def get_embedding(self, filesuffix=None):\n","        return self._Y if filesuffix is None else np.loadtxt(\n","            'embedding_' + filesuffix + '.txt'\n","        )\n","\n","    def get_edge_weight(self, i, j, embed=None, filesuffix=None):\n","        if embed is None:\n","            if filesuffix is None:\n","                embed = self._Y\n","            else:\n","                embed = np.loadtxt('embedding_' + filesuffix + '.txt')\n","        if i == j:\n","            return 0\n","        else:\n","            S_hat = self.get_reconst_from_embed(embed[(i, j), :], filesuffix)\n","            return (S_hat[i, j] + S_hat[j, i]) / 2\n","\n","    def get_reconstructed_adj(self, embed=None, node_l=None, filesuffix=None):\n","        if embed is None:\n","            if filesuffix is None:\n","                embed = self._Y\n","            else:\n","                embed = np.loadtxt('embedding_' + filesuffix + '.txt')\n","        S_hat = self.get_reconst_from_embed(embed, node_l, filesuffix)\n","        return graphify(S_hat)\n","\n","    def get_reconst_from_embed(self, embed, node_l=None, filesuffix=None):\n","        if filesuffix is None:\n","            if node_l is not None:\n","                return self._decoder.predict(\n","                    embed,\n","                    batch_size=self._n_batch)[:, node_l]\n","            else:\n","                return self._decoder.predict(embed, batch_size=self._n_batch)\n","        else:\n","            try:\n","                decoder = model_from_json(\n","                    open('decoder_model_' + filesuffix + '.json').read()\n","                )\n","            except:\n","                print('Error reading file: {0}. Cannot load previous model'.format('decoder_model_'+filesuffix+'.json'))\n","                exit()\n","            try:\n","                decoder.load_weights('decoder_weights_' + filesuffix + '.hdf5')\n","            except:\n","                print('Error reading file: {0}. Cannot load previous weights'.format('decoder_weights_'+filesuffix+'.hdf5'))\n","                exit()\n","            if node_l is not None:\n","                return decoder.predict(embed, batch_size=self._n_batch)[:, node_l]\n","            else:\n","                return decoder.predict(embed, batch_size=self._n_batch)\n","
\n"],"text/plain":["class SDNE(StaticGraphEmbedding):\n","\n"," def __init__(self, *hyper_dict, **kwargs):\n"," ''' Initialize the SDNE class\n","\n"," Args:\n"," d: dimension of the embedding\n"," beta: penalty parameter in matrix B of 2nd order objective\n"," alpha: weighing hyperparameter for 1st order objective\n"," nu1: L1-reg hyperparameter\n"," nu2: L2-reg hyperparameter\n"," K: number of hidden layers in encoder/decoder\n"," n_units: vector of length K-1 containing #units in hidden layers\n"," of encoder/decoder, not including the units in the\n"," embedding layer\n"," rho: bounding ratio for number of units in consecutive layers (< 1)\n"," n_iter: number of sgd iterations for first embedding (const)\n"," xeta: sgd step size parameter\n"," n_batch: minibatch size for SGD\n"," modelfile: Files containing previous encoder and decoder models\n"," weightfile: Files containing previous encoder and decoder weights\n"," '''\n"," hyper_params = {\n"," 'method_name': 'sdne',\n"," 'actfn': 'relu',\n"," 'modelfile': None,\n"," 'weightfile': None,\n"," 'savefilesuffix': None\n","\n"," }\n"," hyper_params.update(kwargs)\n"," for key in hyper_params.keys():\n"," self.__setattr__('_%s' % key, hyper_params[key])\n"," for dictionary in hyper_dict:\n"," for key in dictionary:\n"," self.__setattr__('_%s' % key, dictionary[key])\n","\n"," def get_method_name(self):\n"," return self._method_name\n","\n"," def get_method_summary(self):\n"," return '%s_%d' % (self._method_name, self._d)\n","\n"," def learn_embedding(self, graph=None, edge_f=None,\n"," is_weighted=False, no_python=False):\n"," if not graph and not edge_f:\n"," raise Exception('graph/edge_f needed')\n"," if not graph:\n"," graph = graph_util.loadGraphFromEdgeListTxt(edge_f)\n"," S = nx.to_scipy_sparse_matrix(graph)\n"," t1 = time()\n"," S = (S + S.T) / 2\n"," self._node_num = len(graph.nodes)\n","\n"," # Generate encoder, decoder and autoencoder\n"," self._num_iter = self._n_iter\n"," # If cannot use previous step information, initialize new models\n"," self._encoder = get_encoder(self._node_num, self._d,\n"," self._K, self._n_units,\n"," self._nu1, self._nu2,\n"," self._actfn)\n"," self._decoder = get_decoder(self._node_num, self._d,\n"," self._K, self._n_units,\n"," self._nu1, self._nu2,\n"," self._actfn)\n"," self._autoencoder = get_autoencoder(self._encoder, self._decoder)\n","\n"," # Initialize self._model\n"," # Input\n"," x_in = Input(shape=(2 * self._node_num,), name='x_in')\n"," x1 = Lambda(\n"," lambda x: x[:, 0:self._node_num],\n"," output_shape=(self._node_num,)\n"," )(x_in)\n"," x2 = Lambda(\n"," lambda x: x[:, self._node_num:2 * self._node_num],\n"," output_shape=(self._node_num,)\n"," )(x_in)\n"," # Process inputs\n"," [x_hat1, y1] = self._autoencoder(x1)\n"," [x_hat2, y2] = self._autoencoder(x2)\n"," # Outputs\n"," x_diff1 = merge([x_hat1, x1],\n"," mode=lambda ab: ab[0] - ab[1],\n"," output_shape=lambda L: L[1])\n"," x_diff2 = merge([x_hat2, x2],\n"," mode=lambda ab: ab[0] - ab[1],\n"," output_shape=lambda L: L[1])\n"," y_diff = merge([y2, y1],\n"," mode=lambda ab: ab[0] - ab[1],\n"," output_shape=lambda L: L[1])\n","\n"," # Objectives\n"," def weighted_mse_x(y_true, y_pred):\n"," ''' Hack: This fn doesn't accept additional arguments.\n"," We use y_true to pass them.\n"," y_pred: Contains x_hat - x\n"," y_true: Contains [b, deg]\n"," '''\n"," return KBack.sum(\n"," KBack.square(y_pred * y_true[:, 0:self._node_num]),\n"," axis=-1) / y_true[:, self._node_num]\n","\n"," def weighted_mse_y(y_true, y_pred):\n"," ''' Hack: This fn doesn't accept additional arguments.\n"," We use y_true to pass them.\n"," y_pred: Contains y2 - y1\n"," y_true: Contains s12\n"," '''\n"," min_batch_size = KBack.shape(y_true)[0]\n"," return KBack.reshape(\n"," KBack.sum(KBack.square(y_pred), axis=-1),\n"," [min_batch_size, 1]\n"," ) * y_true\n","\n"," # Model\n"," self._model = Model(input=x_in, output=[x_diff1, x_diff2, y_diff])\n"," sgd = SGD(lr=self._xeta, decay=1e-5, momentum=0.99, nesterov=True)\n"," # adam = Adam(lr=self._xeta, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n"," self._model.compile(\n"," optimizer=sgd,\n"," loss=[weighted_mse_x, weighted_mse_x, weighted_mse_y],\n"," loss_weights=[1, 1, self._alpha]\n"," )\n","\n"," self._model.fit_generator(\n"," generator=batch_generator_sdne(S, self._beta, self._n_batch, True),\n"," nb_epoch=self._num_iter,\n"," samples_per_epoch=S.nonzero()[0].shape[0] // self._n_batch,\n"," verbose=1\n"," )\n"," # Get embedding for all points\n"," self._Y = model_batch_predictor(self._autoencoder, S, self._n_batch)\n"," t2 = time()\n"," # Save the autoencoder and its weights\n"," if(self._weightfile is not None):\n"," saveweights(self._encoder, self._weightfile[0])\n"," saveweights(self._decoder, self._weightfile[1])\n"," if(self._modelfile is not None):\n"," savemodel(self._encoder, self._modelfile[0])\n"," savemodel(self._decoder, self._modelfile[1])\n"," if(self._savefilesuffix is not None):\n"," saveweights(\n"," self._encoder,\n"," 'encoder_weights_' + self._savefilesuffix + '.hdf5'\n"," )\n"," saveweights(\n"," self._decoder,\n"," 'decoder_weights_' + self._savefilesuffix + '.hdf5'\n"," )\n"," savemodel(\n"," self._encoder,\n"," 'encoder_model_' + self._savefilesuffix + '.json'\n"," )\n"," savemodel(\n"," self._decoder,\n"," 'decoder_model_' + self._savefilesuffix + '.json'\n"," )\n"," # Save the embedding\n"," np.savetxt('embedding_' + self._savefilesuffix + '.txt', self._Y)\n"," return self._Y, (t2 - t1)\n","\n"," def get_embedding(self, filesuffix=None):\n"," return self._Y if filesuffix is None else np.loadtxt(\n"," 'embedding_' + filesuffix + '.txt'\n"," )\n","\n"," def get_edge_weight(self, i, j, embed=None, filesuffix=None):\n"," if embed is None:\n"," if filesuffix is None:\n"," embed = self._Y\n"," else:\n"," embed = np.loadtxt('embedding_' + filesuffix + '.txt')\n"," if i == j:\n"," return 0\n"," else:\n"," S_hat = self.get_reconst_from_embed(embed[(i, j), :], filesuffix)\n"," return (S_hat[i, j] + S_hat[j, i]) / 2\n","\n"," def get_reconstructed_adj(self, embed=None, node_l=None, filesuffix=None):\n"," if embed is None:\n"," if filesuffix is None:\n"," embed = self._Y\n"," else:\n"," embed = np.loadtxt('embedding_' + filesuffix + '.txt')\n"," S_hat = self.get_reconst_from_embed(embed, node_l, filesuffix)\n"," return graphify(S_hat)\n","\n"," def get_reconst_from_embed(self, embed, node_l=None, filesuffix=None):\n"," if filesuffix is None:\n"," if node_l is not None:\n"," return self._decoder.predict(\n"," embed,\n"," batch_size=self._n_batch)[:, node_l]\n"," else:\n"," return self._decoder.predict(embed, batch_size=self._n_batch)\n"," else:\n"," try:\n"," decoder = model_from_json(\n"," open('decoder_model_' + filesuffix + '.json').read()\n"," )\n"," except:\n"," print('Error reading file: {0}. Cannot load previous model'.format('decoder_model_'+filesuffix+'.json'))\n"," exit()\n"," try:\n"," decoder.load_weights('decoder_weights_' + filesuffix + '.hdf5')\n"," except:\n"," print('Error reading file: {0}. Cannot load previous weights'.format('decoder_weights_'+filesuffix+'.hdf5'))\n"," exit()\n"," if node_l is not None:\n"," return decoder.predict(embed, batch_size=self._n_batch)[:, node_l]\n"," else:\n"," return decoder.predict(embed, batch_size=self._n_batch)"]},"metadata":{"tags":[]},"execution_count":5}]},{"cell_type":"code","metadata":{"id":"Ho9iK2TEQWYi"},"source":["graph = nx.karate_club_graph()\n","\n","m1 = SDNE(d=2, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3,n_units=[50, 15,], rho=0.3, n_iter=50, \n"," xeta=0.01,n_batch=100,\n"," modelfile=['enc_model.json', 'dec_model.json'],\n"," weightfile=['enc_weights.hdf5', 'dec_weights.hdf5'])"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"A5Mq5ReCRkTG"},"source":["m1.learn_embedding(graph)\n","\n","x, y = list(zip(*m1.get_embedding()))"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"3CNTNS2qRarA"},"source":["plt.plot(x, y, 'o',linewidth=None)"],"execution_count":null,"outputs":[]}]}