{ "graphs" : [ { "id" : "http://purl.obolibrary.org/obo/aio.owl", "meta" : { "basicPropertyValues" : [ { "pred" : "http://purl.org/dc/elements/1.1/description", "val" : "This ontology models classes and relationships describing deep learning networks, their component layers and activation functions, as well as potential biases." }, { "pred" : "http://purl.org/dc/elements/1.1/title", "val" : "Artificial Intelligence Ontology" }, { "pred" : "http://purl.org/dc/terms/license", "val" : "http://creativecommons.org/licenses/by/4.0/" }, { "pred" : "http://www.w3.org/2002/07/owl#versionInfo", "val" : "2023-09-08" } ], "version" : "http://purl.obolibrary.org/obo/aio/releases/2023-09-08/aio.owl" }, "nodes" : [ { "id" : "https://w3id.org/aio/AbstractRNNCell", "lbl" : "AbstractRNNCell", "type" : "CLASS", "meta" : { "definition" : { "val" : "Abstract object representing an RNN cell. This is the base class for implementing RNN cells with custom behavior.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/AbstractRNNCell" ] } } }, { "id" : "https://w3id.org/aio/Activation_Layer", "lbl" : "Activation Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies an activation function to an output.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Activation" ] } } }, { "id" : "https://w3id.org/aio/Active_Learning", "lbl" : "Active Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods which can interactively query a user (or some other information source) to label new data points with the desired outputs.", "xrefs" : [ "https://en.wikipedia.org/wiki/Active_learning_(machine_learning)" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Query Learning" } ] } }, { "id" : "https://w3id.org/aio/ActivityRegularization_Layer", "lbl" : "ActivityRegularization Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that applies an update to the cost function based input activity.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/ActivityRegularization" ] } } }, { "id" : "https://w3id.org/aio/Activity_Bias", "lbl" : "Activity Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A type of selection bias that occurs when systems/platforms get their training data from their most active users, rather than those less active (or inactive).", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/AdaptiveAvgPool1D_Layer", "lbl" : "AdaptiveAvgPool1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 1D adaptive average pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AdaptiveAvgPool1D" }, { "pred" : "hasExactSynonym", "val" : "AdaptiveAvgPool1d" } ] } }, { "id" : "https://w3id.org/aio/AdaptiveAvgPool2D_Layer", "lbl" : "AdaptiveAvgPool2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 2D adaptive average pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AdaptiveAvgPool2D" }, { "pred" : "hasExactSynonym", "val" : "AdaptiveAvgPool2d" } ] } }, { "id" : "https://w3id.org/aio/AdaptiveAvgPool3D_Layer", "lbl" : "AdaptiveAvgPool3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 3D adaptive average pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AdaptiveAvgPool3D" }, { "pred" : "hasExactSynonym", "val" : "AdaptiveAvgPool3d" } ] } }, { "id" : "https://w3id.org/aio/AdaptiveMaxPool1D_Layer", "lbl" : "AdaptiveMaxPool1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 1D adaptive max pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AdaptiveMaxPool1D" }, { "pred" : "hasExactSynonym", "val" : "AdaptiveMaxPool1d" } ] } }, { "id" : "https://w3id.org/aio/AdaptiveMaxPool2D_Layer", "lbl" : "AdaptiveMaxPool2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 2D adaptive max pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AdaptiveMaxPool2D" }, { "pred" : "hasExactSynonym", "val" : "AdaptiveMaxPool2d" } ] } }, { "id" : "https://w3id.org/aio/AdaptiveMaxPool3D_Layer", "lbl" : "AdaptiveMaxPool3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 3D adaptive max pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AdaptiveMaxPool3D" }, { "pred" : "hasExactSynonym", "val" : "AdaptiveMaxPool3d" } ] } }, { "id" : "https://w3id.org/aio/Add_Layer", "lbl" : "Add Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that adds a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add" ] } } }, { "id" : "https://w3id.org/aio/AdditiveAttention_Layer", "lbl" : "AdditiveAttention Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Additive attention layer, a.k.a. Bahdanau-style attention.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/AdditiveAttention" ] } } }, { "id" : "https://w3id.org/aio/AlphaDropout_Layer", "lbl" : "AlphaDropout Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Alpha Dropout to the input. Alpha Dropout is a Dropout that keeps mean and variance of inputs to their original values, in order to ensure the self-normalizing property even after this dropout. Alpha Dropout fits well to Scaled Exponential Linear Units by randomly setting activations to the negative saturation value.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/AlphaDropout" ] } } }, { "id" : "https://w3id.org/aio/Amplification_Bias", "lbl" : "Amplification Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises when the distribution over prediction outputs is skewed in comparison to the prior distribution of the prediction target.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Anchoring_Bias", "lbl" : "Anchoring Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A cognitive bias, the influence of a particular reference point or anchor on people’s decisions. Often more fully referred to as anchoring-and-adjustment, or anchoring-and-adjusting: after an anchor is set, people adjust insufficiently from that anchor point to arrive at a final answer. Decision makers are biased towards an initially presented value.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Annotator_Reporting_Bias", "lbl" : "Annotator Reporting Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "When users rely on automation as a heuristic replacement for their own information seeking and processing. A form of individual bias but often discussed as a group bias, or the larger effects on natural language processing models.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Artificial_Neural_Network", "lbl" : "Artificial Neural Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "An ANN is based on a collection of connected units or nodes called artificial neurons, which loosely model the neurons in a biological brain. Each connection, like the synapses in a biological brain, can transmit a signal to other neurons. An artificial neuron receives a signal then processes it and can signal neurons connected to it. The \"signal\" at a connection is a real number, and the output of each neuron is computed by some non-linear function of the sum of its inputs. The connections are called edges. Neurons and edges typically have a weight that adjusts as Learning proceeds. The weight increases or decreases the strength of the signal at a connection. Neurons may have a threshold such that a signal is sent only if the aggregate signal crosses that threshold. Typically, neurons are aggregated into layers. Different layers may perform different transformations on their inputs. Signals travel from the first layer (the input layer), to the last layer (the output layer), possibly after traversing the layers multiple times.", "xrefs" : [ "https://en.wikipedia.org/wiki/Artificial_neural_network" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "ANN" }, { "pred" : "hasExactSynonym", "val" : "NN" } ] } }, { "id" : "https://w3id.org/aio/Association_Rule_Learning", "lbl" : "Association Rule Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "A rule-based machine learning method for discovering interesting relations between variables in large databases. It is intended to identify strong rules discovered in databases using some measures of interestingness.", "xrefs" : [ "https://en.wikipedia.org/wiki/Association_rule_learning" ] } } }, { "id" : "https://w3id.org/aio/Attention_Layer", "lbl" : "Attention Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Dot-product attention layer, a.k.a. Luong-style attention.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Attention" ] } } }, { "id" : "https://w3id.org/aio/Auto_Encoder_Network", "lbl" : "Auto Encoder Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "An autoencoder is a type of artificial neural network used to learn efficient codings of unlabeled data (unsupervised Learning). The encoding is validated and refined by attempting to regenerate the input from the encoding. The autoencoder learns a representation (encoding) for a set of data, typically for dimensionality reduction, by training the network to ignore insignificant data (“noise”). (https://en.wikipedia.org/wiki/Autoencoder)", "xrefs" : [ "https://en.wikipedia.org/wiki/Autoencoder" ] }, "comments" : [ "Input, Hidden, Matched Output-Input" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AE" } ] } }, { "id" : "https://w3id.org/aio/Automation_Complacency_Bias", "lbl" : "Automation Complacency Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "When humans over-rely on automated systems or have their skills attenuated by such over-reliance (e.g., spelling and autocorrect or spellcheckers).", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Automation Complaceny" } ] } }, { "id" : "https://w3id.org/aio/Availability_Heuristic_Bias", "lbl" : "Availability Heuristic Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A mental shortcut whereby people tend to overweight what comes easily or quickly to mind, meaning that what is easier to recall—e.g., more “available”—receives greater emphasis in judgement and decision-making.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Availability Bias" }, { "pred" : "hasExactSynonym", "val" : "Availability Heuristic" } ] } }, { "id" : "https://w3id.org/aio/AveragePooling1D_Layer", "lbl" : "AveragePooling1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Average pooling for temporal data. Downsamples the input representation by taking the average value over the window defined by pool_size. The window is shifted by strides. The resulting output when using \"valid\" padding option has a shape of: output_shape = (input_shape - pool_size + 1) / strides). The resulting output shape when using the \"same\" padding option is: output_shape = input_shape / strides.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling1D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AvgPool1D" }, { "pred" : "hasExactSynonym", "val" : "AvgPool1d" } ] } }, { "id" : "https://w3id.org/aio/AveragePooling2D_Layer", "lbl" : "AveragePooling2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Average pooling operation for spatial data. Downsamples the input along its spatial dimensions (height and width) by taking the average value over an input window (of size defined by pool_size) for each channel of the input. The window is shifted by strides along each dimension. The resulting output when using \"valid\" padding option has a shape (number of rows or columns) of: output_shape = math.floor((input_shape - pool_size) / strides) + 1 (when input_shape >= pool_size). The resulting output shape when using the \"same\" padding option is: output_shape = math.floor((input_shape - 1) / strides) + 1.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AvgPool2D" }, { "pred" : "hasExactSynonym", "val" : "AvgPool2d" } ] } }, { "id" : "https://w3id.org/aio/AveragePooling3D_Layer", "lbl" : "AveragePooling3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Average pooling operation for 3D data (spatial or spatio-temporal). Downsamples the input along its spatial dimensions (depth, height, and width) by taking the average value over an input window (of size defined by pool_size) for each channel of the input. The window is shifted by strides along each dimension.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling3D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AvgPool3D" }, { "pred" : "hasExactSynonym", "val" : "AvgPool3d" } ] } }, { "id" : "https://w3id.org/aio/Average_Layer", "lbl" : "Average Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that averages a list of inputs element-wise. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Average" ] } } }, { "id" : "https://w3id.org/aio/AvgPool1D_Layer", "lbl" : "AvgPool1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 1D average pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AvgPool1D" }, { "pred" : "hasExactSynonym", "val" : "AvgPool1d" } ] } }, { "id" : "https://w3id.org/aio/AvgPool2D_Layer", "lbl" : "AvgPool2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 2D average pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AvgPool2D" }, { "pred" : "hasExactSynonym", "val" : "AvgPool2d" } ] } }, { "id" : "https://w3id.org/aio/AvgPool3D_Layer", "lbl" : "AvgPool3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 3D average pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "AvgPool3D" }, { "pred" : "hasExactSynonym", "val" : "AvgPool3d" } ] } }, { "id" : "https://w3id.org/aio/BatchNorm1D_Layer", "lbl" : "BatchNorm1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Batch Normalization over a 2D or 3D input as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "BatchNorm1D" }, { "pred" : "hasExactSynonym", "val" : "BatchNorm1d" }, { "pred" : "hasExactSynonym", "val" : "nn.BatchNorm1d" } ] } }, { "id" : "https://w3id.org/aio/BatchNorm2D_Layer", "lbl" : "BatchNorm2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "BatchNorm2D" }, { "pred" : "hasExactSynonym", "val" : "BatchNorm2d" }, { "pred" : "hasExactSynonym", "val" : "nn.BatchNorm2d" } ] } }, { "id" : "https://w3id.org/aio/BatchNorm3D_Layer", "lbl" : "BatchNorm3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "BatchNorm3D" }, { "pred" : "hasExactSynonym", "val" : "BatchNorm3d" }, { "pred" : "hasExactSynonym", "val" : "nn.BatchNorm3d" } ] } }, { "id" : "https://w3id.org/aio/BatchNormalization_Layer", "lbl" : "BatchNormalization Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that normalizes its inputs. Batch normalization applies a transformation that maintains the mean output close to 0 and the output standard deviation close to 1. Importantly, batch normalization works differently during training and during inference. During training (i.e. when using fit() or when calling the layer/model with the argument training=True), the layer normalizes its output using the mean and standard deviation of the current batch of inputs. That is to say, for each channel being normalized, the layer returns gamma * (batch - mean(batch)) / sqrt(var(batch) + epsilon) + beta, where: epsilon is small constant (configurable as part of the constructor arguments), gamma is a learned scaling factor (initialized as 1), which can be disabled by passing scale=False to the constructor. beta is a learned offset factor (initialized as 0), which can be disabled by passing center=False to the constructor. During inference (i.e. when using evaluate() or predict() or when calling the layer/model with the argument training=False (which is the default), the layer normalizes its output using a moving average of the mean and standard deviation of the batches it has seen during training. That is to say, it returns gamma * (batch - self.moving_mean) / sqrt(self.moving_var + epsilon) + beta. self.moving_mean and self.moving_var are non-trainable variables that are updated each time the layer in called in training mode, as such: moving_mean = moving_mean * momentum + mean(batch) * (1 - momentum) moving_var = moving_var * momentum + var(batch) * (1 - momentum).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization" ] } } }, { "id" : "https://w3id.org/aio/Bayesian_Network", "lbl" : "Bayesian Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A probabilistic graphical model that represents a set of variables and their conditional dependencies via a directed acyclic graph (DAG).", "xrefs" : [ "https://en.wikipedia.org/wiki/Bayesian_network" ] } } }, { "id" : "https://w3id.org/aio/Behavioral_Bias", "lbl" : "Behavioral Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Systematic distortions in user behavior across platforms or contexts, or across users represented in different datasets.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Bias", "lbl" : "Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Systematic error introduced into sampling or testing by selecting or encouraging one outcome or answer over others.", "xrefs" : [ "https://www.merriam-webster.com/dictionary/bias" ] } } }, { "id" : "https://w3id.org/aio/Biclustering", "lbl" : "Biclustering", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that simultaneously cluster the rows and columns of a matrix.", "xrefs" : [ "https://en.wikipedia.org/wiki/Biclustering" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Block Clustering" }, { "pred" : "hasExactSynonym", "val" : "Co-clustering" }, { "pred" : "hasExactSynonym", "val" : "Joint Clustering" }, { "pred" : "hasExactSynonym", "val" : "Two-mode Clustering" }, { "pred" : "hasExactSynonym", "val" : "Two-way Clustering" } ] } }, { "id" : "https://w3id.org/aio/Bidirectional_Layer", "lbl" : "Bidirectional Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Bidirectional wrapper for RNNs.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Bidirectional" ] } } }, { "id" : "https://w3id.org/aio/Binary_Classification", "lbl" : "Binary Classification", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that classify the elements of a set into two groups (each called class) on the basis of a classification rule.", "xrefs" : [ "https://en.wikipedia.org/wiki/Binary_classification" ] } } }, { "id" : "https://w3id.org/aio/Boltzmann_Machine_Network", "lbl" : "Boltzmann Machine Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A Boltzmann machine is a type of stochastic recurrent neural network. It is a Markov random field. It was translated from statistical physics for use in cognitive science. The Boltzmann machine is based on a stochastic spin-glass model with an external field, i.e., a Sherrington–Kirkpatrick model that is a stochastic Ising Model[2] and applied to machine Learning.", "xrefs" : [ "https://en.wikipedia.org/wiki/Boltzmann_machine" ] }, "comments" : [ "Backfed Input, Probabilistic Hidden" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "BM" }, { "pred" : "hasExactSynonym", "val" : "Sherrington–Kirkpatrick model with external field" }, { "pred" : "hasExactSynonym", "val" : "stochastic Hopfield network with hidden units" }, { "pred" : "hasExactSynonym", "val" : "stochastic Ising-Lenz-Little model" } ] } }, { "id" : "https://w3id.org/aio/Categorical_Features_Preprocessing_Layer", "lbl" : "Categorical Features Preprocessing Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A layer that performs categorical data preprocessing operations.", "xrefs" : [ "https://keras.io/guides/preprocessing_layers/" ] } } }, { "id" : "https://w3id.org/aio/CategoryEncoding_Layer", "lbl" : "CategoryEncoding Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which encodes integer features. This layer provides options for condensing data into a categorical encoding when the total number of tokens are known in advance. It accepts integer values as inputs, and it outputs a dense or sparse representation of those inputs. For integer inputs where the total number of tokens is not known, use tf.keras.layers.IntegerLookup instead.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/CategoryEncoding" ] } } }, { "id" : "https://w3id.org/aio/Causal_Graphical_Model", "lbl" : "Causal Graphical Model", "type" : "CLASS", "meta" : { "definition" : { "val" : "Probabilistic graphical models used to encode assumptions about the data-generating process.", "xrefs" : [ "https://en.wikipedia.org/wiki/Causal_graph" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Casaul Bayesian Network" }, { "pred" : "hasExactSynonym", "val" : "Casaul Graph" }, { "pred" : "hasExactSynonym", "val" : "DAG" }, { "pred" : "hasExactSynonym", "val" : "Directed Acyclic Graph" }, { "pred" : "hasExactSynonym", "val" : "Path Diagram" } ] } }, { "id" : "https://w3id.org/aio/CenterCrop_Layer", "lbl" : "CenterCrop Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which crops images. This layers crops the central portion of the images to a target size. If an image is smaller than the target size, it will be resized and cropped so as to return the largest possible window in the image that matches the target aspect ratio. Input pixel values can be of any range (e.g. [0., 1.) or [0, 255]) and of interger or floating point dtype. By default, the layer will output floats.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/CenterCrop" ] } } }, { "id" : "https://w3id.org/aio/Classification", "lbl" : "Classification", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that distinguishand distribute kinds of \"things\" into different groups.", "xrefs" : [ "https://en.wikipedia.org/wiki/Classification_(general_theory)" ] } } }, { "id" : "https://w3id.org/aio/Clustering", "lbl" : "Clustering", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that group a set of objects in such a way that objects in the same group (called a cluster) are more similar (in some sense) to each other than to those in other groups (clusters).", "xrefs" : [ "https://en.wikipedia.org/wiki/Cluster_analysis" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Cluster analysis" } ] } }, { "id" : "https://w3id.org/aio/Cognitive_Bias", "lbl" : "Cognitive Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A broad term referring generally to a systematic pattern of deviation from rational judgement and decision-making. A large variety of cognitive biases have been identified over many decades of research in judgement and decision-making, some of which are adaptive mental shortcuts known as heuristics.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Computational_Bias", "lbl" : "Computational Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A systematic tendency which causes differences between results and facts. The bias exists in numbers of the process of data analysis, including the source of the data, the estimator chosen, and the ways the data was analyzed.", "xrefs" : [ "https://en.wikipedia.org/wiki/Bias_(statistics)" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Statistical Bias" } ] } }, { "id" : "https://w3id.org/aio/Concatenate_Layer", "lbl" : "Concatenate Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that concatenates a list of inputs. It takes as input a list of tensors, all of the same shape except for the concatenation axis, and returns a single tensor that is the concatenation of all inputs.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Concatenate" ] } } }, { "id" : "https://w3id.org/aio/Concept_Drift_Bias", "lbl" : "Concept Drift Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Use of a system outside the planned domain of application, and a common cause of performance gaps between laboratory settings and the real world.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Concept Drift" } ] } }, { "id" : "https://w3id.org/aio/Confirmation_Bias", "lbl" : "Confirmation Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A cognitive bias where people tend to prefer information that aligns with, or confirms, their existing beliefs. People can exhibit confirmation bias in the search for, interpretation of, and recall of information. In the famous Wason selection task experiments, participants repeatedly showed a preference for confirmation over falsification. They were tasked with identifying an underlying rule that applied to number triples they were shown, and they overwhelmingly tested triples that confirmed rather than falsified their hypothesized rule.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Consumer_Bias", "lbl" : "Consumer Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises when an algorithm or platform provides users with a new venue within which to express their biases, and may occur from either side, or party, in a digital interaction..", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Content_Production_Bias", "lbl" : "Content Production Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises from structural, lexical, semantic, and syntactic differences in the contents generated by users.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Continual_Learning", "lbl" : "Continual Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "A concept to learn a model for a large number of tasks sequentially without forgetting knowledge obtained from the preceding tasks, where the data in the old tasks are not available any more during training new ones.", "xrefs" : [ "https://paperswithcode.com/task/continual-learning" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Incremental Learning" }, { "pred" : "hasExactSynonym", "val" : "Life-Long Learning" } ] } }, { "id" : "https://w3id.org/aio/Contrastive_Learning", "lbl" : "Contrastive Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Learning that encourages augmentations (views) of the same input to have more similar representations compared to augmentations of different inputs.", "xrefs" : [ "https://arxiv.org/abs/2202.14037" ] } } }, { "id" : "https://w3id.org/aio/ConvLSTM1D_Layer", "lbl" : "ConvLSTM1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "1D Convolutional LSTM. Similar to an LSTM layer, but the input transformations and recurrent transformations are both convolutional.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/ConvLSTM1D" ] } } }, { "id" : "https://w3id.org/aio/ConvLSTM2D_Layer", "lbl" : "ConvLSTM2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "2D Convolutional LSTM. Similar to an LSTM layer, but the input transformations and recurrent transformations are both convolutional.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/ConvLSTM2D" ] } } }, { "id" : "https://w3id.org/aio/ConvLSTM3D_Layer", "lbl" : "ConvLSTM3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "3D Convolutional LSTM. Similar to an LSTM layer, but the input transformations and recurrent transformations are both convolutional.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/ConvLSTM3D" ] } } }, { "id" : "https://w3id.org/aio/Convolution1DTranspose_Layer", "lbl" : "Convolution1DTranspose Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Transposed convolution layer (sometimes called Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. When using this layer as the first layer in a model, provide the keyword argument input_shape (tuple of integers or None, does not include the sample axis), e.g. input_shape=(128, 3) for data with 128 time steps and 3 channels.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1DTranspose" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Conv1DTranspose Layer" }, { "pred" : "hasExactSynonym", "val" : "ConvTranspose1d" }, { "pred" : "hasExactSynonym", "val" : "Convolution1DTranspose" }, { "pred" : "hasExactSynonym", "val" : "Convolution1dTranspose" }, { "pred" : "hasExactSynonym", "val" : "nn.ConvTranspose1d" } ] } }, { "id" : "https://w3id.org/aio/Convolution1D_Layer", "lbl" : "Convolution1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "1D convolution layer (e.g. temporal convolution).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Conv1D Layer" }, { "pred" : "hasExactSynonym", "val" : "Conv1d" }, { "pred" : "hasExactSynonym", "val" : "Convolution1D" }, { "pred" : "hasExactSynonym", "val" : "Convolution1d" }, { "pred" : "hasExactSynonym", "val" : "nn.Conv1d" } ] } }, { "id" : "https://w3id.org/aio/Convolution2DTranspose_Layer", "lbl" : "Convolution2DTranspose Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Transposed convolution layer (sometimes called Deconvolution).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2DTranspose" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Conv2DTranspose Layer" }, { "pred" : "hasExactSynonym", "val" : "ConvTranspose2d" }, { "pred" : "hasExactSynonym", "val" : "Convolution2DTranspose" }, { "pred" : "hasExactSynonym", "val" : "Convolution2dTranspose" }, { "pred" : "hasExactSynonym", "val" : "nn.ConvTranspose2d" } ] } }, { "id" : "https://w3id.org/aio/Convolution2D_Layer", "lbl" : "Convolution2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "2D convolution layer (e.g. spatial convolution over images). This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. If use_bias is True, a bias vector is created and added to the outputs. Finally, if activation is not None, it is applied to the outputs as well. When using this layer as the first layer in a model, provide the keyword argument input_shape (tuple of integers or None, does not include the sample axis), e.g. input_shape=(128, 128, 3) for 128x128 RGB pictures in data_format=\"channels_last\". You can use None when a dimension has variable size.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Conv2D Layer" }, { "pred" : "hasExactSynonym", "val" : "Conv2d" }, { "pred" : "hasExactSynonym", "val" : "Convolution2D" }, { "pred" : "hasExactSynonym", "val" : "Convolution2d" }, { "pred" : "hasExactSynonym", "val" : "nn.Conv2d" } ] } }, { "id" : "https://w3id.org/aio/Convolution3DTranspose_Layer", "lbl" : "Convolution3DTranspose Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Transposed convolution layer (sometimes called Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. When using this layer as the first layer in a model, provide the keyword argument input_shape (tuple of integers or None, does not include the sample axis), e.g. input_shape=(128, 128, 128, 3) for a 128x128x128 volume with 3 channels if data_format=\"channels_last\".", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv3DTranspose" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Conv3DTranspose Layer" }, { "pred" : "hasExactSynonym", "val" : "ConvTranspose3d" }, { "pred" : "hasExactSynonym", "val" : "Convolution3DTranspose" }, { "pred" : "hasExactSynonym", "val" : "Convolution3dTranspose" }, { "pred" : "hasExactSynonym", "val" : "nn.ConvTranspose3d" } ] } }, { "id" : "https://w3id.org/aio/Convolution3D_Layer", "lbl" : "Convolution3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "3D convolution layer (e.g. spatial convolution over volumes). This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. If use_bias is True, a bias vector is created and added to the outputs. Finally, if activation is not None, it is applied to the outputs as well. When using this layer as the first layer in a model, provide the keyword argument input_shape (tuple of integers or None, does not include the sample axis), e.g. input_shape=(128, 128, 128, 1) for 128x128x128 volumes with a single channel, in data_format=\"channels_last\".", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv3D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Conv3D Layer" }, { "pred" : "hasExactSynonym", "val" : "Conv3d" }, { "pred" : "hasExactSynonym", "val" : "Convolution3D" }, { "pred" : "hasExactSynonym", "val" : "Convolution3d" }, { "pred" : "hasExactSynonym", "val" : "nn.Conv3d" } ] } }, { "id" : "https://w3id.org/aio/Convolutional_Layer", "lbl" : "Convolutional Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A convolutional layer is the main building block of a CNN. It contains a set of filters (or kernels), parameters of which are to be learned throughout the training. The size of the filters is usually smaller than the actual image. Each filter convolves with the image and creates an activation map.", "xrefs" : [ "https://www.sciencedirect.com/topics/engineering/convolutional-layer#:~:text=A%20convolutional%20layer%20is%20the,and%20creates%20an%20activation%20map." ] } } }, { "id" : "https://w3id.org/aio/Cropping1D_Layer", "lbl" : "Cropping1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Cropping layer for 1D input (e.g. temporal sequence). It crops along the time dimension (axis 1).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Cropping1D" ] } } }, { "id" : "https://w3id.org/aio/Cropping2D_Layer", "lbl" : "Cropping2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Cropping layer for 2D input (e.g. picture). It crops along spatial dimensions, i.e. height and width.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Cropping2D" ] } } }, { "id" : "https://w3id.org/aio/Cropping3D_Layer", "lbl" : "Cropping3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Cropping layer for 3D data (e.g. spatial or spatio-temporal).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Cropping3D" ] } } }, { "id" : "https://w3id.org/aio/Data_Dredging_Bias", "lbl" : "Data Dredging Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A statistical bias in which testing huge numbers of hypotheses of a dataset may appear to yield statistical significance even when the results are statistically nonsignificant.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Data Dredging" } ] } }, { "id" : "https://w3id.org/aio/Data_Generation_Bias", "lbl" : "Data Generation Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises from the addition of synthetic or redundant data samples to a dataset.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Data_Imputation", "lbl" : "Data Imputation", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that replace missing data with substituted values.", "xrefs" : [ "https://en.wikipedia.org/wiki/Imputation_(statistics)" ] } } }, { "id" : "https://w3id.org/aio/Decision_Tree", "lbl" : "Decision Tree", "type" : "CLASS", "meta" : { "definition" : { "val" : "A decision support tool that uses a tree-like model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility.", "xrefs" : [ "https://en.wikipedia.org/wiki/Decision_tree" ] } } }, { "id" : "https://w3id.org/aio/Decoder_LLM", "lbl" : "Decoder LLM", "type" : "CLASS", "meta" : { "definition" : { "val" : "In the decoder-only architecture, the model consists of only a decoder, which is trained to predict the next token in a sequence given the previous tokens. The critical difference between the Decoder-only architecture and the Encoder-Decoder architecture is that the Decoder-only architecture does not have an explicit encoder to summarize the input information. Instead, the information is encoded implicitly in the hidden state of the decoder, which is updated at each step of the generation process.", "xrefs" : [ "https://www.practicalai.io/understanding-transformer-model-architectures/#:~:text=Encoder%2Donly&text=These%20models%20have%20a%20pre,Named%20entity%20recognition" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LLM" } ] } }, { "id" : "https://w3id.org/aio/Deconvolutional_Network", "lbl" : "Deconvolutional Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "Deconvolutional Networks, a framework that permits the unsupervised construction of hierarchical image representations. These representations can be used for both low-level tasks such as denoising, as well as providing features for object recognition. Each level of the hierarchy groups information from the level beneath to form more complex features that exist over a larger scale in the image. (https://ieeexplore.ieee.org/document/5539957)", "xrefs" : [ "https://ieeexplore.ieee.org/document/5539957" ] }, "comments" : [ "Input, Kernel, Convolutional/Pool, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "DN" } ] } }, { "id" : "https://w3id.org/aio/Deep_Active_Learning", "lbl" : "Deep Active Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "The combination of deep learning and active learning, where active learning attempts to maximize a model’s performance gain while annotating the fewest samples possible.", "xrefs" : [ "https://arxiv.org/pdf/2009.00236.pdf" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "DeepAL" } ] } }, { "id" : "https://w3id.org/aio/Deep_Belief_Network", "lbl" : "Deep Belief Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "In machine Learning, a deep belief network (DBN) is a generative graphical model, or alternatively a class of deep neural network, composed of multiple layers of latent variables (\"hidden units\"), with connections between the layers but not between units within each layer. When trained on a set of examples without supervision, a DBN can learn to probabilistically reconstruct its inputs. The layers then act as feature detectors. After this Learning step, a DBN can be further trained with supervision to perform classification. DBNs can be viewed as a composition of simple, unsupervised networks such as restricted Boltzmann machines (RBMs) or autoencoders, where each sub-network's hidden layer serves as the visible layer for the next. An RBM is an undirected, generative energy-based model with a \"visible\" input layer and a hidden layer and connections between but not within layers. This composition leads to a fast, layer-by-layer unsupervised training procedure, where contrastive divergence is applied to each sub-network in turn, starting from the \"lowest\" pair of layers (the lowest visible layer is a training set). The observation that DBNs can be trained greedily, one layer at a time, led to one of the first effective deep Learning algorithms. (https://en.wikipedia.org/wiki/Deep_belief_network)", "xrefs" : [ "https://en.wikipedia.org/wiki/Deep_belief_network" ] }, "comments" : [ "Backfed Input, Probabilistic Hidden, Hidden, Matched Output-Input" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "DBN" } ] } }, { "id" : "https://w3id.org/aio/Deep_Convolutional_Inverse_Graphics_Network", "lbl" : "Deep Convolutional Inverse Graphics Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A Deep Convolution Inverse Graphics Network (DC-IGN) is a model that learns an interpretable representation of images. This representation is disentangled with respect to transformations such as out-of-plane rotations and lighting variations. The DC-IGN model is composed of multiple layers of convolution and de-convolution operators and is trained using the Stochastic Gradient Variational Bayes (SGVB) algorithm. (https://arxiv.org/abs/1503.03167)" }, "comments" : [ "Input, Kernel, Convolutional/Pool, Probabilistic Hidden, Convolutional/Pool, Kernel, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "DCIGN" } ] } }, { "id" : "https://w3id.org/aio/Deep_Convolutional_Network", "lbl" : "Deep Convolutional Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A convolutional neural network (CNN, or ConvNet) is a class of artificial neural network, most commonly applied to analyze visual imagery. They are also known as shift invariant or space invariant artificial neural networks (SIANN), based on the shared-weight architecture of the convolution kernels or filters that slide along input features and provide translation equivariant responses known as feature maps. CNNs are regularized versions of multilayer perceptrons. (https://en.wikipedia.org/wiki/Convolutional_neural_network)", "xrefs" : [ "https://en.wikipedia.org/wiki/Convolutional_neural_network" ] }, "comments" : [ "Input, Kernel, Convolutional/Pool, Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "CNN" }, { "pred" : "hasExactSynonym", "val" : "ConvNet" }, { "pred" : "hasExactSynonym", "val" : "Convolutional Neural Network" }, { "pred" : "hasExactSynonym", "val" : "DCN" } ] } }, { "id" : "https://w3id.org/aio/Deep_FeedFoward", "lbl" : "Deep FeedFoward", "type" : "CLASS", "meta" : { "definition" : { "val" : "The feedforward neural network was the first and simplest type of artificial neural network devised. In this network, the information moves in only one direction—forward—from the input nodes, through the hidden nodes (if any) and to the output nodes. There are no cycles or loops in the network.", "xrefs" : [ "https://en.wikipedia.org/wiki/Feedforward_neural_network" ] }, "comments" : [ "Input, Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "DFF" }, { "pred" : "hasExactSynonym", "val" : "FFN" }, { "pred" : "hasExactSynonym", "val" : "Feedforward Network" }, { "pred" : "hasExactSynonym", "val" : "MLP" }, { "pred" : "hasExactSynonym", "val" : "Multilayer Perceptoron" } ] } }, { "id" : "https://w3id.org/aio/Deep_Neural_Network", "lbl" : "Deep Neural Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A deep neural network (DNN) is an artificial neural network (ANN) with multiple layers between the input and output layers.[13][2] There are different types of neural networks but they always consist of the same components: neurons, synapses, weights, biases, and functions. (https://en.wikipedia.org/wiki/Deep_Learning#:~:text=A%20deep%20neural%20network%20(DNN,weights%2C%20biases%2C%20and%20functions.)" }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "DNN" } ] } }, { "id" : "https://w3id.org/aio/Deep_Transfer_Learning", "lbl" : "Deep Transfer Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Deep transfer learning methods relax the hypothesis that the training data must be independent and identically distributed (i.i.d.) with the test data, which motivates us to use transfer learning to solve the problem of insufficient training data.", "xrefs" : [ "https://arxiv.org/abs/1808.01974" ] } } }, { "id" : "https://w3id.org/aio/Denoising_Auto_Encoder", "lbl" : "Denoising Auto Encoder", "type" : "CLASS", "meta" : { "definition" : { "val" : "Denoising Auto Encoders (DAEs) take a partially corrupted input and are trained to recover the original undistorted input. In practice, the objective of denoising autoencoders is that of cleaning the corrupted input, or denoising. (https://en.wikipedia.org/wiki/Autoencoder)" }, "comments" : [ "Noisy Input, Hidden, Matched Output-Input" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "DAE" } ] } }, { "id" : "https://w3id.org/aio/DenseFeatures_Layer", "lbl" : "DenseFeatures Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A layer that produces a dense Tensor based on given feature_columns. Generally a single example in training data is described with FeatureColumns. At the first layer of the model, this column oriented data should be converted to a single Tensor. This layer can be called multiple times with different features. This is the V2 version of this layer that uses name_scopes to create variables instead of variable_scopes. But this approach currently lacks support for partitioned variables. In that case, use the V1 version instead.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/DenseFeatures" ] } } }, { "id" : "https://w3id.org/aio/Dense_Layer", "lbl" : "Dense Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Just your regular densely-connected NN layer.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense" ] } } }, { "id" : "https://w3id.org/aio/Deployment_Bias", "lbl" : "Deployment Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises when systems are used as decision aids for humans, since the human intermediary may act on predictions in ways that are typically not modeled in the system. However, it is still individuals using the deployed system.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/DepthwiseConv1D_Layer", "lbl" : "DepthwiseConv1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Depthwise 1D convolution. Depthwise convolution is a type of convolution in which each input channel is convolved with a different kernel (called a depthwise kernel). You can understand depthwise convolution as the first step in a depthwise separable convolution. It is implemented via the following steps: Split the input into individual channels. Convolve each channel with an individual depthwise kernel with depth_multiplier output channels. Concatenate the convolved outputs along the channels axis. Unlike a regular 1D convolution, depthwise convolution does not mix information across different input channels. The depth_multiplier argument determines how many filter are applied to one input channel. As such, it controls the amount of output channels that are generated per input channel in the depthwise step.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/DepthwiseConv1D" ] } } }, { "id" : "https://w3id.org/aio/DepthwiseConv2D_Layer", "lbl" : "DepthwiseConv2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Depthwise 2D convolution.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/DepthwiseConv2D" ] } } }, { "id" : "https://w3id.org/aio/Detection_Bias", "lbl" : "Detection Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Systematic differences between groups in how outcomes are determined and may cause an over- or underestimation of the size of the effect.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Dimensionality_Reduction", "lbl" : "Dimensionality Reduction", "type" : "CLASS", "meta" : { "definition" : { "val" : "The transformation of data from a high-dimensional space into a low-dimensional space so that the low-dimensional representation retains some meaningful properties of the original data, ideally close to its intrinsic dimension.", "xrefs" : [ "https://en.wikipedia.org/wiki/Dimensionality_reduction" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Dimension Reduction" } ] } }, { "id" : "https://w3id.org/aio/Discretization_Layer", "lbl" : "Discretization Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which buckets continuous features by ranges.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Discretization" ] } } }, { "id" : "https://w3id.org/aio/Dot_Layer", "lbl" : "Dot Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that computes a dot product between samples in two tensors. E.g. if applied to a list of two tensors a and b of shape (batch_size, n), the output will be a tensor of shape (batch_size, 1) where each entry i will be the dot product between a[i] and b[i].", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dot" ] } } }, { "id" : "https://w3id.org/aio/Dropout_Layer", "lbl" : "Dropout Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Dropout to the input. The Dropout layer randomly sets input units to 0 with a frequency of rate at each step during training time, which helps prevent overfitting. Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over all inputs is unchanged. Note that the Dropout layer only applies when training is set to True such that no values are dropped during inference. When using model.fit, training will be appropriately set to True automatically, and in other contexts, you can set the kwarg explicitly to True when calling the layer. (This is in contrast to setting trainable=False for a Dropout layer. trainable does not affect the layer's behavior, as Dropout does not have any variables/weights that can be frozen during training.)", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout" ] } } }, { "id" : "https://w3id.org/aio/Dunning-Kruger_Effect_Bias", "lbl" : "Dunning-Kruger Effect Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "The tendency of people with low ability in a given area or task to overestimate their self-assessed ability. Typically measured by comparing self-assessment with objective performance, often called subjective ability and objective ability, respectively.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Dunning-Kruger Effect" } ] } }, { "id" : "https://w3id.org/aio/ELU_Layer", "lbl" : "ELU Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Exponential Linear Unit.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/ELU" ] } } }, { "id" : "https://w3id.org/aio/Echo_State_Network", "lbl" : "Echo State Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "The echo state network (ESN) is a type of reservoir computer that uses a recurrent neural network with a sparsely connected hidden layer (with typically 1% connectivity). The connectivity and weights of hidden neurons are fixed and randomly assigned. The weights of output neurons can be learned so that the network can produce or reproduce specific temporal patterns. The main interest of this network is that although its behaviour is non-linear, the only weights that are modified during training are for the synapses that connect the hidden neurons to output neurons. Thus, the error function is quadratic with respect to the parameter vector and can be differentiated easily to a linear system.", "xrefs" : [ "https://en.wikipedia.org/wiki/Echo_state_network#:~:text=The%20echo%20state%20network%20(ESN,are%20fixed%20and%20randomly%20assigned" ] }, "comments" : [ "Input, Recurrent, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "ESN" } ] } }, { "id" : "https://w3id.org/aio/Ecological_Fallacy_Bias", "lbl" : "Ecological Fallacy Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Occurs when an inference is made about an individual based on their membership within a group.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Ecological Fallacy" } ] } }, { "id" : "https://w3id.org/aio/Embedding_Layer", "lbl" : "Embedding Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Turns positive integers (indexes) into dense vectors of fixed size.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding" ] } } }, { "id" : "https://w3id.org/aio/Emergent_Bias", "lbl" : "Emergent Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Emergent bias is the result of the use and reliance on algorithms across new or unanticipated contexts.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Encoder-Decoder_LLM", "lbl" : "Encoder-Decoder LLM", "type" : "CLASS", "meta" : { "definition" : { "val" : "The Encoder-Decoder architecture was the original transformer architecture introduced in the Attention Is All You Need (https://arxiv.org/abs/1706.03762) paper. The encoder processes the input sequence and generates a hidden representation that summarizes the input information. The decoder uses this hidden representation to generate the desired output sequence. The encoder and decoder are trained end-to-end to maximize the likelihood of the correct output sequence given the input sequence.", "xrefs" : [ "https://www.practicalai.io/understanding-transformer-model-architectures/#:~:text=Encoder%2Donly&text=These%20models%20have%20a%20pre,Named%20entity%20recognition" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LLM" } ] } }, { "id" : "https://w3id.org/aio/Encoder_LLM", "lbl" : "Encoder LLM", "type" : "CLASS", "meta" : { "definition" : { "val" : "The Encoder-only architecture is used when only encoding the input sequence is required and the decoder is not necessary. The input sequence is encoded into a fixed-length representation and then used as input to a classifier or a regressor to make a prediction. These models have a pre-trained general-purpose encoder but will require fine-tuning of the final classifier or regressor.", "xrefs" : [ "https://www.practicalai.io/understanding-transformer-model-architectures/#:~:text=Encoder%2Donly&text=These%20models%20have%20a%20pre,Named%20entity%20recognition" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LLM" } ] } }, { "id" : "https://w3id.org/aio/Ensemble_Learning", "lbl" : "Ensemble Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Ensemble methods use multiple learning algorithms to obtain better predictive performance than could be obtained from any of the constituent learning algorithms alone.", "xrefs" : [ "https://en.wikipedia.org/wiki/Ensemble_learning" ] } } }, { "id" : "https://w3id.org/aio/Error_Propagation_Bias", "lbl" : "Error Propagation Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "The effect of variables' uncertainties (or errors, more specifically random errors) on the uncertainty of a function based on them.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Error Propagation" } ] } }, { "id" : "https://w3id.org/aio/Evaluation_Bias", "lbl" : "Evaluation Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises when the testing or external benchmark populations do not equally represent the various parts of the user population or from the use of performance metrics that are not appropriate for the way in which the model will be used.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Exclusion_Bias", "lbl" : "Exclusion Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "When specific groups of user populations are excluded from testing and subsequent analyses.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Exponential_Function", "lbl" : "Exponential Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "The exponential function is a mathematical function denoted by f(x)=exp or e^{x}.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/exponential" ] } } }, { "id" : "https://w3id.org/aio/Extreme_Learning_Machine", "lbl" : "Extreme Learning Machine", "type" : "CLASS", "meta" : { "definition" : { "val" : "Extreme Learning machines are feedforward neural networks for classification, regression, clustering, sparse approximation, compression and feature Learning with a single layer or multiple layers of hidden nodes, where the parameters of hidden nodes (not just the weights connecting inputs to hidden nodes) need not be tuned. These hidden nodes can be randomly assigned and never updated (i.e. they are random projection but with nonlinear transforms), or can be inherited from their ancestors without being changed. In most cases, the output weights of hidden nodes are usually learned in a single step, which essentially amounts to Learning a linear model. (https://en.wikipedia.org/wiki/Extreme_Learning_machine)", "xrefs" : [ "https://en.wikipedia.org/wiki/Extreme_Learning_machine" ] }, "comments" : [ "Input, Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "ELM" } ] } }, { "id" : "https://w3id.org/aio/Federated_Learning", "lbl" : "Federated Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "A technique that trains an algorithm across multiple decentralized edge devices or servers holding local data samples, without exchanging them.", "xrefs" : [ "https://en.wikipedia.org/wiki/Federated_learning" ] } } }, { "id" : "https://w3id.org/aio/Feedback_Loop_Bias", "lbl" : "Feedback Loop Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Effects that may occur when an algorithm learns from user behavior and feeds that behavior back into the model.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Feedback_Network", "lbl" : "Feedback Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A feedback based approach in which the representation is formed in an iterative manner based on a feedback received from previous iteration's output. (https://arxiv.org/abs/1612.09508)" }, "comments" : [ "Input, Hidden, Output, Hidden" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "FBN" } ] } }, { "id" : "https://w3id.org/aio/Fixed_Effects_Model", "lbl" : "Fixed Effects Model", "type" : "CLASS", "meta" : { "definition" : { "val" : "A statistical model in which the model parameters are fixed or non-random quantities.", "xrefs" : [ "https://en.wikipedia.org/wiki/Fixed_effects_model" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "FEM" } ] } }, { "id" : "https://w3id.org/aio/Flatten_Layer", "lbl" : "Flatten Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Flattens the input. Does not affect the batch size.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Flatten" ] } } }, { "id" : "https://w3id.org/aio/FractionalMaxPool2D_Layer", "lbl" : "FractionalMaxPool2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 2D fractional max pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "FractionalMaxPool2D" }, { "pred" : "hasExactSynonym", "val" : "FractionalMaxPool2d" } ] } }, { "id" : "https://w3id.org/aio/FractionalMaxPool3D_Layer", "lbl" : "FractionalMaxPool3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 3D fractional max pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "FractionalMaxPool3D" }, { "pred" : "hasExactSynonym", "val" : "FractionalMaxPool3d" } ] } }, { "id" : "https://w3id.org/aio/Funding_Bias", "lbl" : "Funding Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises when biased results are reported in order to support or satisfy the funding agency or financial supporter of the research study, but it can also be the individual researcher.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/GRUCell_Layer", "lbl" : "GRUCell Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Cell class for the GRU layer. This class processes one step within the whole time sequence input, whereas tf.keras.layer.GRU processes the whole sequence.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GRUCell" ] } } }, { "id" : "https://w3id.org/aio/GRU_Layer", "lbl" : "GRU Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Gated Recurrent Unit - Cho et al. 2014. Based on available runtime hardware and constraints, this layer will choose different implementations (cuDNN-based or pure-TensorFlow) to maximize the performance. If a GPU is available and all the arguments to the layer meet the requirement of the cuDNN kernel (see below for details), the layer will use a fast cuDNN implementation. The requirements to use the cuDNN implementation are: activation == tanh, recurrent_activation == sigmoid, recurrent_dropout == 0, unroll is False, use_bias is True, reset_after is True. Inputs, if use masking, are strictly right-padded. Eager execution is enabled in the outermost context. There are two variants of the GRU implementation. The default one is based on v3 and has reset gate applied to hidden state before matrix multiplication. The other one is based on original and has the order reversed. The second variant is compatible with CuDNNGRU (GPU-only) and allows inference on CPU. Thus it has separate biases for kernel and recurrent_kernel. To use this variant, set reset_after=True and recurrent_activation='sigmoid'.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GRU" ] } } }, { "id" : "https://w3id.org/aio/Gated_Recurrent_Unit", "lbl" : "Gated Recurrent Unit", "type" : "CLASS", "meta" : { "definition" : { "val" : "Gated recurrent units (GRUs) are a gating mechanism in recurrent neural networks, introduced in 2014 by Kyunghyun Cho et al. The GRU is like a long short-term memory (LSTM) with a forget gate, but has fewer parameters than LSTM, as it lacks an output gate. GRU's performance on certain tasks of polyphonic music modeling, speech signal modeling and natural language processing was found to be similar to that of LSTM.[4][5] GRUs have been shown to exhibit better performance on certain smaller and less frequent datasets.", "xrefs" : [ "https://en.wikipedia.org/wiki/Gated_recurrent_unit" ] }, "comments" : [ "Input, Memory Cell, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GRU" } ] } }, { "id" : "https://w3id.org/aio/GaussianDropout_Layer", "lbl" : "GaussianDropout Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Apply multiplicative 1-centered Gaussian noise. As it is a regularization layer, it is only active at training time.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GaussianDropout" ] } } }, { "id" : "https://w3id.org/aio/GaussianNoise_Layer", "lbl" : "GaussianNoise Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Apply additive zero-centered Gaussian noise. This is useful to mitigate overfitting (you could see it as a form of random data augmentation). Gaussian Noise (GS) is a natural choice as corruption process for real valued inputs. As it is a regularization layer, it is only active at training time.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GaussianNoise" ] } } }, { "id" : "https://w3id.org/aio/GeLu_Function", "lbl" : "GELU Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "Gaussian error linear unit (GELU) computes x * P(X <= x), where P(X) ~ N(0, 1). The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their sign as in ReLU.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/gelu" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GELU" }, { "pred" : "hasExactSynonym", "val" : "Gaussian Error Linear Unit" } ] } }, { "id" : "https://w3id.org/aio/Generalized_Few-shot_Learning", "lbl" : "Generalized Few-shot Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that can learn novel classes from only few samples per class, preventing catastrophic forgetting of base classes, and classifier calibration across novel and base classes.", "xrefs" : [ "https://paperswithcode.com/paper/generalized-and-incremental-few-shot-learning/review/" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GFSL" } ] } }, { "id" : "https://w3id.org/aio/Generalized_Linear_Model", "lbl" : "Generalized Linear Model", "type" : "CLASS", "meta" : { "definition" : { "val" : "This model generalizes linear regression by allowing the linear model to be related to the response variable via a link function and by allowing the magnitude of the variance of each measurement to be a function of its predicted value.", "xrefs" : [ "https://en.wikipedia.org/wiki/Generalized_linear_model" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GLM" } ] } }, { "id" : "https://w3id.org/aio/Generative_Adversarial_Network", "lbl" : "Generative Adversarial Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A generative adversarial network (GAN) is a class of machine Learning frameworks designed by Ian Goodfellow and his colleagues in 2014. Two neural networks contest with each other in a game (in the form of a zero-sum game, where one agent's gain is another agent's loss). Given a training set, this technique learns to generate new data with the same statistics as the training set. For example, a GAN trained on photographs can generate new photographs that look at least superficially authentic to human observers, having many realistic characteristics. Though originally proposed as a form of generative model for unsupervised Learning, GANs have also proven useful for semi-supervised Learning, fully supervised Learning,[ and reinforcement Learning. The core idea of a GAN is based on the \"indirect\" training through the discriminator,[clarification needed] which itself is also being updated dynamically. This basically means that the generator is not trained to minimize the distance to a specific image, but rather to fool the discriminator. This enables the model to learn in an unsupervised manner.", "xrefs" : [ "https://en.wikipedia.org/wiki/Generative_adversarial_network" ] }, "comments" : [ "Backfed Input, Hidden, Matched Output-Input, Hidden, Matched Output-Input" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GAN" } ] } }, { "id" : "https://w3id.org/aio/GlobalAveragePooling1D_Layer", "lbl" : "GlobalAveragePooling1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Global average pooling operation for temporal data.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GlobalAveragePooling1D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GlobalAvgPool1D" }, { "pred" : "hasExactSynonym", "val" : "GlobalAvgPool1d" } ] } }, { "id" : "https://w3id.org/aio/GlobalAveragePooling2D_Layer", "lbl" : "GlobalAveragePooling2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Global average pooling operation for spatial data.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GlobalAveragePooling2D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GlobalAvgPool2D" }, { "pred" : "hasExactSynonym", "val" : "GlobalAvgPool2d" } ] } }, { "id" : "https://w3id.org/aio/GlobalAveragePooling3D_Layer", "lbl" : "GlobalAveragePooling3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Global Average pooling operation for 3D data.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GlobalAveragePooling3D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GlobalAvgPool3D" }, { "pred" : "hasExactSynonym", "val" : "GlobalAvgPool3d" } ] } }, { "id" : "https://w3id.org/aio/GlobalMaxPooling1D_Layer", "lbl" : "GlobalMaxPooling1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Global max pooling operation for 1D temporal data.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GlobalMaxPool1D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GlobalMaxPool1D" }, { "pred" : "hasExactSynonym", "val" : "GlobalMaxPool1d" } ] } }, { "id" : "https://w3id.org/aio/GlobalMaxPooling2D_Layer", "lbl" : "GlobalMaxPooling2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Global max pooling operation for spatial data.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GlobalMaxPool2D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GlobalMaxPool2D" }, { "pred" : "hasExactSynonym", "val" : "GlobalMaxPool2d" } ] } }, { "id" : "https://w3id.org/aio/GlobalMaxPooling3D_Layer", "lbl" : "GlobalMaxPooling3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Global Max pooling operation for 3D data.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GlobalMaxPool3D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GlobalMaxPool3D" }, { "pred" : "hasExactSynonym", "val" : "GlobalMaxPool3d" } ] } }, { "id" : "https://w3id.org/aio/Graph_Convolutional_Network", "lbl" : "Graph Convolutional Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "GCN is a type of convolutional neural network that can work directly on graphs and take advantage of their structural information. (https://arxiv.org/abs/1609.02907)", "xrefs" : [ "https://arxiv.org/abs/1609.02907" ] }, "comments" : [ "Input, Hidden, Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GCN" } ] } }, { "id" : "https://w3id.org/aio/Graph_Convolutional_Policy_Network", "lbl" : "Graph Convolutional Policy Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "Graph Convolutional Policy Network (GCPN), a general graph convolutional network based model for goal-directed graph generation through reinforcement Learning. The model is trained to optimize domain-specific rewards and adversarial loss through policy gradient, and acts in an environment that incorporates domain-specific rules.", "xrefs" : [ "https://arxiv.org/abs/1806.02473" ] }, "comments" : [ "Input, Hidden, Hidden, Policy, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GPCN" } ] } }, { "id" : "https://w3id.org/aio/GroupNorm_Layer", "lbl" : "GroupNorm Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Group Normalization over a mini-batch of inputs as described in the paper Group Normalization", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "GroupNorm" }, { "pred" : "hasExactSynonym", "val" : "nn.GroupNorm" } ] } }, { "id" : "https://w3id.org/aio/Group_Bias", "lbl" : "Group Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A pattern of favoring members of one's in-group over out-group members. This can be expressed in evaluation of others, in allocation of resources, and in many other ways.", "xrefs" : [ "https://en.wikipedia.org/wiki/In-group_favoritism" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "In-group Favoritism" }, { "pred" : "hasExactSynonym", "val" : "In-group bias" }, { "pred" : "hasExactSynonym", "val" : "In-group preference" }, { "pred" : "hasExactSynonym", "val" : "In-group–out-group Bias" }, { "pred" : "hasExactSynonym", "val" : "Intergroup bias" } ] } }, { "id" : "https://w3id.org/aio/Groupthink_Bias", "lbl" : "Groupthink Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A psychological phenomenon that occurs when people in a group tend to make non-optimal decisions based on their desire to conform to the group, or fear of dissenting with the group. In groupthink, individuals often refrain from expressing their personal disagreement with the group, hesitating to voice opinions that do not align with the group.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Groupthink" } ] } }, { "id" : "https://w3id.org/aio/Hard_Sigmoid_Function", "lbl" : "Hard Sigmoid Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "A faster approximation of the sigmoid activation. Piecewise linear approximation of the sigmoid function. Ref: 'https://en.wikipedia.org/wiki/Hard_sigmoid'", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/hard_sigmoid" ] } } }, { "id" : "https://w3id.org/aio/Hashing_Layer", "lbl" : "Hashing Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which hashes and bins categorical features. This layer transforms categorical inputs to hashed output. It element-wise converts a ints or strings to ints in a fixed range. The stable hash function uses tensorflow::ops::Fingerprint to produce the same output consistently across all platforms. This layer uses FarmHash64 by default, which provides a consistent hashed output across different platforms and is stable across invocations, regardless of device and context, by mixing the input bits thoroughly. If you want to obfuscate the hashed output, you can also pass a random salt argument in the constructor. In that case, the layer will use the SipHash64 hash function, with the salt value serving as additional input to the hash function.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Hashing" ] } } }, { "id" : "https://w3id.org/aio/Hidden_Layer", "lbl" : "Hidden Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A hidden layer is located between the input and output of the algorithm, in which the function applies weights to the inputs and directs them through an activation function as the output. In short, the hidden layers perform nonlinear transformations of the inputs entered into the network. Hidden layers vary depending on the function of the neural network, and similarly, the layers may vary depending on their associated weights.", "xrefs" : [ "https://deepai.org/machine-Learning-glossary-and-terms/hidden-layer-machine-Learning" ] } } }, { "id" : "https://w3id.org/aio/Hierarchical_Classification", "lbl" : "Hierarchical Classification", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that group things according to a hierarchy.", "xrefs" : [ "https://en.wikipedia.org/wiki/Hierarchical_classification" ] } } }, { "id" : "https://w3id.org/aio/Hierarchical_Clustering", "lbl" : "Hierarchical Clustering", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that seek to build a hierarchy of clusters.", "xrefs" : [ "https://en.wikipedia.org/wiki/Hierarchical_clustering" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "HCL" } ] } }, { "id" : "https://w3id.org/aio/Historical_Bias", "lbl" : "Historical Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Referring to the long-standing biases encoded in society over time. Related to, but distinct from, biases in historical description, or the interpretation, analysis, and explanation of history. A common example of historical bias is the tendency to view the larger world from a Western or European view.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Hopfield_Network", "lbl" : "Hopfield Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A Hopfield network is a form of recurrent artificial neural network and a type of spin glass system popularised by John Hopfield in 1982 as described earlier by Little in 1974 based on Ernst Ising's work with Wilhelm Lenz on the Ising model. Hopfield networks serve as content-addressable (\"associative\") memory systems with binary threshold nodes, or with continuous variables. Hopfield networks also provide a model for understanding human memory. (https://en.wikipedia.org/wiki/Hopfield_network)", "xrefs" : [ "https://en.wikipedia.org/wiki/Hopfield_network" ] }, "comments" : [ "Backfed input" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "HN" }, { "pred" : "hasExactSynonym", "val" : "Ising model of a neural network" }, { "pred" : "hasExactSynonym", "val" : "Ising–Lenz–Little model" } ] } }, { "id" : "https://w3id.org/aio/Hostile_Attribution_Bias", "lbl" : "Hostile Attribution Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A bias wherein individuals perceive benign or ambiguous behaviors as hostile.", "xrefs" : [ "https://en.wikipedia.org/wiki/Interpretive_bias" ] } } }, { "id" : "https://w3id.org/aio/Human_Bias", "lbl" : "Human Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Systematic errors in human thought based on a limited number of heuristic principles and predicting values to simpler judgmental operations.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Human_Reporting_Bias", "lbl" : "Human Reporting Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "When users rely on automation as a heuristic replacement for their own information seeking and processing.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Image_Augmentation_Layer", "lbl" : "Image Augmentation Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A layer that performs image data preprocessing augmentations.", "xrefs" : [ "https://keras.io/guides/preprocessing_layers/" ] } } }, { "id" : "https://w3id.org/aio/Image_Preprocessing_Layer", "lbl" : "Image Preprocessing Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A layer that performs image data preprocessing operations.", "xrefs" : [ "https://keras.io/guides/preprocessing_layers/" ] } } }, { "id" : "https://w3id.org/aio/Implicit_Bias", "lbl" : "Implicit Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "An unconscious belief, attitude, feeling, association, or stereotype that can affect the way in which humans process information, make decisions, and take actions.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Confirmatory Bias" } ] } }, { "id" : "https://w3id.org/aio/Incremenetal_Few-shot_Learning", "lbl" : "Incremenetal Few-shot Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that train a network on a base set of classes and then is presented several novel classes, each with only a few labeled examples.", "xrefs" : [ "https://arxiv.org/abs/1810.07218" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "IFSL" } ] } }, { "id" : "https://w3id.org/aio/Individual_Bias", "lbl" : "Individual Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Individual bias is a persistent point of view or limited list of such points of view that one applies (\"parent\", \"academic\", \"professional\", or etc.).", "xrefs" : [ "https://develop.consumerium.org/wiki/Individual_bias" ] } } }, { "id" : "https://w3id.org/aio/Inherited_Bias", "lbl" : "Inherited Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises when applications that are built with machine Learning are used to generate inputs for other machine Learning algorithms. If the output is biased in any way, this bias may be inherited by systems using the output as input to learn other models.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/InputLayer_Layer", "lbl" : "InputLayer Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer to be used as an entry point into a Network (a graph of layers).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/InputLayer" ] } } }, { "id" : "https://w3id.org/aio/InputSpec_Layer", "lbl" : "InputSpec Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Specifies the rank, dtype and shape of every input to a layer. Layers can expose (if appropriate) an input_spec attribute: an instance of InputSpec, or a nested structure of InputSpec instances (one per input tensor). These objects enable the layer to run input compatibility checks for input structure, input rank, input shape, and input dtype. A None entry in a shape is compatible with any dimension, a None shape is compatible with any shape.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/InputSpec" ] } } }, { "id" : "https://w3id.org/aio/Input_Layer", "lbl" : "Input Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "The input layer of a neural network is composed of artificial input neurons, and brings the initial data into the system for further processing by subsequent layers of artificial neurons. The input layer is the very beginning of the workflow for the artificial neural network.", "xrefs" : [ "https://www.techopedia.com/definition/33262/input-layer-neural-networks#:~:text=Explains%20Input%20Layer-,What%20Does%20Input%20Layer%20Mean%3F,for%20the%20artificial%20neural%20network." ] } } }, { "id" : "https://w3id.org/aio/InstanceNorm1d_Layer", "lbl" : "InstanceNorm1d Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Instance Normalization over a 2D (unbatched) or 3D (batched) input as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "InstanceNorm1D" }, { "pred" : "hasExactSynonym", "val" : "InstanceNorm1d" }, { "pred" : "hasExactSynonym", "val" : "nn.InstanceNorm1d" } ] } }, { "id" : "https://w3id.org/aio/InstanceNorm2d", "lbl" : "InstanceNorm2d", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "InstanceNorm2D" }, { "pred" : "hasExactSynonym", "val" : "InstanceNorm2d" }, { "pred" : "hasExactSynonym", "val" : "nn.InstanceNorm2d" } ] } }, { "id" : "https://w3id.org/aio/InstanceNorm3d_Layer", "lbl" : "InstanceNorm3d Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "InstanceNorm3D" }, { "pred" : "hasExactSynonym", "val" : "InstanceNorm3d" }, { "pred" : "hasExactSynonym", "val" : "nn.InstanceNorm3d" } ] } }, { "id" : "https://w3id.org/aio/Institutional_Bias", "lbl" : "Institutional Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "In contrast to biases exhibited at the level of individual persons, institutional bias refers to a tendency exhibited at the level of entire institutions, where practices or norms result in the favoring or disadvantaging of certain social groups. Common examples include institutional racism and institutional sexism.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/IntegerLookup_Layer", "lbl" : "IntegerLookup Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which maps integer features to contiguous ranges.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/IntegerLookup" ] } } }, { "id" : "https://w3id.org/aio/Interpretation_Bias", "lbl" : "Interpretation Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A form of information processing bias that can occur when users interpret algorithmic outputs according to their internalized biases and views.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/K-nearest_Neighbor_Algorithm", "lbl" : "K-nearest Neighbor Algorithm", "type" : "CLASS", "meta" : { "definition" : { "val" : "An algorithm to group objects by a plurality vote of its neighbors, with the object being assigned to the class most common among its k nearest neighbors", "xrefs" : [ "https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "K-NN" }, { "pred" : "hasExactSynonym", "val" : "KNN" } ] } }, { "id" : "https://w3id.org/aio/K-nearest_Neighbor_Classification_Algorithm", "lbl" : "K-nearest Neighbor Classification Algorithm", "type" : "CLASS", "meta" : { "definition" : { "val" : "An algorithm to classify objects by a plurality vote of its neighbors, with the object being assigned to the class most common among its k nearest neighbors", "xrefs" : [ "https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "K-NN" }, { "pred" : "hasExactSynonym", "val" : "KNN" } ] } }, { "id" : "https://w3id.org/aio/K-nearest_Neighbor_Regression_Algorithm", "lbl" : "K-nearest Neighbor Regression Algorithm", "type" : "CLASS", "meta" : { "definition" : { "val" : "An algorithm to assign the average of the values of k nearest neighbors to objects.", "xrefs" : [ "https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "K-NN" }, { "pred" : "hasExactSynonym", "val" : "KNN" } ] } }, { "id" : "https://w3id.org/aio/Kohonen_Network", "lbl" : "Kohonen Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A self-organizing map (SOM) or self-organizing feature map (SOFM) is an unsupervised machine Learning technique used to produce a low-dimensional (typically two-dimensional) representation of a higher dimensional data set while preserving the topological structure of the data. For example, a data set with p variables measured in n observations could be represented as clusters of observations with similar values for the variables. These clusters then could be visualized as a two-dimensional \"map\" such that observations in proximal clusters have more similar values than observations in distal clusters. This can make high-dimensional data easier to visualize and analyze. An SOM is a type of artificial neural network but is trained using competitive Learning rather than the error-correction Learning (e.g., backpropagation with gradient descent) used by other artificial neural networks. The SOM was introduced by the Finnish professor Teuvo Kohonen in the 1980s and therefore is sometimes called a Kohonen map or Kohonen network.[1][2] The Kohonen map or network is a computationally convenient abstraction building on biological models of neural systems from the 1970s[3] and morphogenesis models dating back to Alan Turing in the 1950s.", "xrefs" : [ "https://en.wikipedia.org/wiki/Self-organizing_map" ] }, "comments" : [ "Input, Hidden" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "KN" }, { "pred" : "hasExactSynonym", "val" : "SOFM" }, { "pred" : "hasExactSynonym", "val" : "SOM" }, { "pred" : "hasExactSynonym", "val" : "Self-Organizing Feature Map" }, { "pred" : "hasExactSynonym", "val" : "Self-Organizing Map" } ] } }, { "id" : "https://w3id.org/aio/LPPool1D_Layer", "lbl" : "LPPool1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 1D power-average pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LPPool1D" }, { "pred" : "hasExactSynonym", "val" : "LPPool1d" } ] } }, { "id" : "https://w3id.org/aio/LPPool2D_Layer", "lbl" : "LPPool2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies a 2D power-average pooling over an input signal composed of several input planes.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LPPool2D" }, { "pred" : "hasExactSynonym", "val" : "LPPool2d" } ] } }, { "id" : "https://w3id.org/aio/LSTMCell_Layer", "lbl" : "LSTMCell Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Cell class for the LSTM layer.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTMCell" ] } } }, { "id" : "https://w3id.org/aio/LSTM_Layer", "lbl" : "LSTM Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Long Short-Term Memory layer - Hochreiter 1997. Based on available runtime hardware and constraints, this layer will choose different implementations (cuDNN-based or pure-TensorFlow) to maximize the performance. If a GPU is available and all the arguments to the layer meet the requirement of the cuDNN kernel (see below for details), the layer will use a fast cuDNN implementation. The requirements to use the cuDNN implementation are: 1. activation == tanh, 2. recurrent_activation == sigmoid, 3. recurrent_dropout == 0, 4. unroll is False, 5. use_bias is True, 6. Inputs, if use masking, are strictly right-padded, 7. Eager execution is enabled in the outermost context.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM" ] } } }, { "id" : "https://w3id.org/aio/Lambda_Layer", "lbl" : "Lambda Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Wraps arbitrary expressions as a Layer object.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Lambda" ] } } }, { "id" : "https://w3id.org/aio/Large_Language_Model", "lbl" : "Large Language Model", "type" : "CLASS", "meta" : { "definition" : { "val" : "A large language model (LLM) is a language model consisting of a neural network with many parameters (typically billions of weights or more), trained on large quantities of unlabeled text using self-supervised learning or semi-supervised learning.", "xrefs" : [ "https://en.wikipedia.org/wiki/Large_language_model" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LLM" } ] } }, { "id" : "https://w3id.org/aio/Lasso_Regression", "lbl" : "Lasso Regression", "type" : "CLASS", "meta" : { "definition" : { "val" : "A regression analysis method that performs both variable selection and regularizationin order to enhance the prediction accuracy and interpretability of the resulting statistical model.", "xrefs" : [ "https://en.wikipedia.org/wiki/Lasso_(statistics)" ] } } }, { "id" : "https://w3id.org/aio/Layer", "lbl" : "Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Network layer parent class" } } }, { "id" : "https://w3id.org/aio/LayerNorm_Layer", "lbl" : "LayerNorm Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Layer Normalization over a mini-batch of inputs as described in the paper Layer Normalization", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LayerNorm" }, { "pred" : "hasExactSynonym", "val" : "nn.LayerNorm" } ] } }, { "id" : "https://w3id.org/aio/LayerNormalization_Layer", "lbl" : "LayerNormalization Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer normalization layer (Ba et al., 2016). Normalize the activations of the previous layer for each given example in a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that maintains the mean activation within each example close to 0 and the activation standard deviation close to 1. Given a tensor inputs, moments are calculated and normalization is performed across the axes specified in axis.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/LayerNormalization" ] } } }, { "id" : "https://w3id.org/aio/Layer_Layer", "lbl" : "Layer Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "This is the class from which all layers inherit. A layer is a callable object that takes as input one or more tensors and that outputs one or more tensors. It involves computation, defined in the call() method, and a state (weight variables). State can be created in various places, at the convenience of the subclass implementer: in __init__(); in the optional build() method, which is invoked by the first __call__() to the layer, and supplies the shape(s) of the input(s), which may not have been known at initialization time; in the first invocation of call(), with some caveats discussed below. Users will just instantiate a layer and then treat it as a callable.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer" ] } } }, { "id" : "https://w3id.org/aio/LazyBatchNorm1D_Layer", "lbl" : "LazyBatchNorm1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A torch.nn.BatchNorm1d module with lazy initialization of the num_features argument of the BatchNorm1d that is inferred from the input.size(1).", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LazyBatchNorm1D" }, { "pred" : "hasExactSynonym", "val" : "LazyBatchNorm1d" }, { "pred" : "hasExactSynonym", "val" : "nn.LazyBatchNorm1d" } ] } }, { "id" : "https://w3id.org/aio/LazyBatchNorm2D_Layer", "lbl" : "LazyBatchNorm2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A torch.nn.BatchNorm2d module with lazy initialization of the num_features argument of the BatchNorm2d that is inferred from the input.size(1).", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LazyBatchNorm2D" }, { "pred" : "hasExactSynonym", "val" : "LazyBatchNorm2d" }, { "pred" : "hasExactSynonym", "val" : "nn.LazyBatchNorm2d" } ] } }, { "id" : "https://w3id.org/aio/LazyBatchNorm3D_Layer", "lbl" : "LazyBatchNorm3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A torch.nn.BatchNorm3d module with lazy initialization of the num_features argument of the BatchNorm3d that is inferred from the input.size(1).", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LazyBatchNorm3D" }, { "pred" : "hasExactSynonym", "val" : "LazyBatchNorm3d" }, { "pred" : "hasExactSynonym", "val" : "nn.LazyBatchNorm3d" } ] } }, { "id" : "https://w3id.org/aio/LazyInstanceNorm1d_Layer", "lbl" : "LazyInstanceNorm1d Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A torch.nn.InstanceNorm1d module with lazy initialization of the num_features argument of the InstanceNorm1d that is inferred from the input.size(1).", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LazyInstanceNorm1D" }, { "pred" : "hasExactSynonym", "val" : "LazyInstanceNorm1d" }, { "pred" : "hasExactSynonym", "val" : "nn.LazyInstanceNorm1d" } ] } }, { "id" : "https://w3id.org/aio/LazyInstanceNorm2d_Layer", "lbl" : "LazyInstanceNorm2d Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A torch.nn.InstanceNorm2d module with lazy initialization of the num_features argument of the InstanceNorm2d that is inferred from the input.size(1).", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LazyInstanceNorm2D" }, { "pred" : "hasExactSynonym", "val" : "LazyInstanceNorm2d" }, { "pred" : "hasExactSynonym", "val" : "nn.LazyInstanceNorm2d" } ] } }, { "id" : "https://w3id.org/aio/LazyInstanceNorm3d_Layer", "lbl" : "LazyInstanceNorm3d Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A torch.nn.InstanceNorm3d module with lazy initialization of the num_features argument of the InstanceNorm3d that is inferred from the input.size(1).", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LazyInstanceNorm3D" }, { "pred" : "hasExactSynonym", "val" : "LazyInstanceNorm3d" }, { "pred" : "hasExactSynonym", "val" : "nn.LazyInstanceNorm3d" } ] } }, { "id" : "https://w3id.org/aio/LeakyReLU_Layer", "lbl" : "LeakyReLU Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Leaky version of a Rectified Linear Unit.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/LeakyReLU" ] } } }, { "id" : "https://w3id.org/aio/Least-squares_Analysis", "lbl" : "Least-squares Analysis", "type" : "CLASS", "meta" : { "definition" : { "val" : "A standard approach in regression analysis to approximate the solution of overdetermined systems(sets of equations in which there are more equations than unknowns) by minimizing the sum of the squares of the residuals (a residual being the difference between an observed value and the fitted value provided by a model) made in the results of each individual equation.", "xrefs" : [ "https://en.wikipedia.org/wiki/Least_squares" ] } } }, { "id" : "https://w3id.org/aio/Linear_Function", "lbl" : "Linear Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "A linear function has the form f(x) = a + bx.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/linear" ] } } }, { "id" : "https://w3id.org/aio/Linear_Regression", "lbl" : "Linear Regression", "type" : "CLASS", "meta" : { "definition" : { "val" : "A linear approach for modelling the relationship between a scalar response and one or more explanatory variables (also known as dependent and independent variables).", "xrefs" : [ "https://en.wikipedia.org/wiki/Linear_regression" ] } } }, { "id" : "https://w3id.org/aio/Linking_Bias", "lbl" : "Linking Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises when network attributes obtained from user connections, activities, or interactions differ and misrepresent the true behavior of the users.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Liquid_State_Machine_Network", "lbl" : "Liquid State Machine Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A liquid state machine (LSM) is a type of reservoir computer that uses a spiking neural network. An LSM consists of a large collection of units (called nodes, or neurons). Each node receives time varying input from external sources (the inputs) as well as from other nodes. Nodes are randomly connected to each other. The recurrent nature of the connections turns the time varying input into a spatio-temporal pattern of activations in the network nodes. The spatio-temporal patterns of activation are read out by linear discriminant units. The soup of recurrently connected nodes will end up computing a large variety of nonlinear functions on the input. Given a large enough variety of such nonlinear functions, it is theoretically possible to obtain linear combinations (using the read out units) to perform whatever mathematical operation is needed to perform a certain task, such as speech recognition or computer vision. The word liquid in the name comes from the analogy drawn to dropping a stone into a still body of water or other liquid. The falling stone will generate ripples in the liquid. The input (motion of the falling stone) has been converted into a spatio-temporal pattern of liquid displacement (ripples). (https://en.wikipedia.org/wiki/Liquid_state_machine)", "xrefs" : [ "https://en.wikipedia.org/wiki/Liquid_state_machine" ] }, "comments" : [ "Input, Spiking Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LSM" } ] } }, { "id" : "https://w3id.org/aio/LocalResponseNorm_Layer", "lbl" : "LocalResponseNorm Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies local response normalization over an input signal composed of several input planes, where channels occupy the second dimension.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LocalResponseNorm" }, { "pred" : "hasExactSynonym", "val" : "nn.LocalResponseNorm" } ] } }, { "id" : "https://w3id.org/aio/Locally-connected_Layer", "lbl" : "Locally-connected Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "The LocallyConnected1D layer works similarly to the Convolution1D layer, except that weights are unshared, that is, a different set of filters is applied at each different patch of the input.", "xrefs" : [ "https://faroit.com/keras-docs/1.2.2/layers/local/" ] } } }, { "id" : "https://w3id.org/aio/LocallyConnected1D_Layer", "lbl" : "LocallyConnected1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Locally-connected layer for 1D inputs. The LocallyConnected1D layer works similarly to the Conv1D layer, except that weights are unshared, that is, a different set of filters is applied at each different patch of the input.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/LocallyConnected1D" ] } } }, { "id" : "https://w3id.org/aio/LocallyConnected2D_Layer", "lbl" : "LocallyConnected2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Locally-connected layer for 2D inputs. The LocallyConnected2D layer works similarly to the Conv2D layer, except that weights are unshared, that is, a different set of filters is applied at each different patch of the input.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/LocallyConnected2D" ] } } }, { "id" : "https://w3id.org/aio/Logistic_Regression", "lbl" : "Logistic Regression", "type" : "CLASS", "meta" : { "definition" : { "val" : "A statistical model that models the probability of an event taking place by having the log-odds for the event be a linear combination of one or more independent variables.", "xrefs" : [ "https://en.wikipedia.org/wiki/Logistic_regression" ] } } }, { "id" : "https://w3id.org/aio/Long_Short_Term_Memory", "lbl" : "Long Short Term Memory", "type" : "CLASS", "meta" : { "definition" : { "val" : "Long short-term memory (LSTM) is an artificial recurrent neural network (RNN) architecture used in the field of deep Learning. Unlike standard feedforward neural networks, LSTM has feedback connections. It can process not only single data points (such as images), but also entire sequences of data (such as speech or video). For example, LSTM is applicable to tasks such as unsegmented, connected handwriting recognition, speech recognition and anomaly detection in network traffic or IDSs (intrusion detection systems). A common LSTM unit is composed of a cell, an input gate, an output gate and a forget gate. The cell remembers values over arbitrary time intervals and the three gates regulate the flow of information into and out of the cell.", "xrefs" : [ "https://en.wikipedia.org/wiki/Long_short-term_memory" ] }, "comments" : [ "Input, Memory Cell, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "LSTM" } ] } }, { "id" : "https://w3id.org/aio/Loss_Of_Situational_Awareness_Bias", "lbl" : "Loss Of Situational Awareness Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "When automation leads to humans being unaware of their situation such that, when control of a system is given back to them in a situation where humans and machines cooperate, they are unprepared to assume their duties. This can be a loss of awareness over what automation is and isn’t taking care of.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Machine_Learning", "lbl" : "Machine Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "A field of inquiry devoted to understanding and building methods that 'learn', that is, methods that leverage data to improve performance on some set of tasks.", "xrefs" : [ "https://en.wikipedia.org/wiki/Machine_learning" ] } } }, { "id" : "https://w3id.org/aio/Manifold_Learning", "lbl" : "Manifold Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods based on the assumption that one's observed data lie on a low-dimensional manifold embedded in a higher-dimensional space.", "xrefs" : [ "https://arxiv.org/abs/2011.01307" ] } } }, { "id" : "https://w3id.org/aio/Markov_Chain", "lbl" : "Markov Chain", "type" : "CLASS", "meta" : { "definition" : { "val" : "A Markov chain or Markov process is a stochastic model describing a sequence of possible events in which the probability of each event depends only on the state attained in the previous event.[1][2][3] A countably infinite sequence, in which the chain moves state at discrete time steps, gives a discrete-time Markov chain (DTMC). A continuous-time process is called a continuous-time Markov chain (CTMC). It is named after the Russian mathematician Andrey Markov.", "xrefs" : [ "https://en.wikipedia.org/wiki/Markov_chain" ] }, "comments" : [ "Probalistic Hidden" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "MC" }, { "pred" : "hasExactSynonym", "val" : "MP" }, { "pred" : "hasExactSynonym", "val" : "Markov Process" } ] } }, { "id" : "https://w3id.org/aio/Masking_Layer", "lbl" : "Masking Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Masks a sequence by using a mask value to skip timesteps. For each timestep in the input tensor (dimension #1 in the tensor), if all values in the input tensor at that timestep are equal to mask_value, then the timestep will be masked (skipped) in all downstream layers (as long as they support masking). If any downstream layer does not support masking yet receives such an input mask, an exception will be raised.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Masking" ] } } }, { "id" : "https://w3id.org/aio/MaxPooling1D_Layer", "lbl" : "MaxPooling1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Max pooling operation for 1D temporal data. Downsamples the input representation by taking the maximum value over a spatial window of size pool_size. The window is shifted by strides. The resulting output, when using the \"valid\" padding option, has a shape of: output_shape = (input_shape - pool_size + 1) / strides) The resulting output shape when using the \"same\" padding option is: output_shape = input_shape / strides.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool1D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "MaxPool1D" }, { "pred" : "hasExactSynonym", "val" : "MaxPool1d" }, { "pred" : "hasExactSynonym", "val" : "MaxPooling1D" }, { "pred" : "hasExactSynonym", "val" : "MaxPooling1d" } ] } }, { "id" : "https://w3id.org/aio/MaxPooling2D_Layer", "lbl" : "MaxPooling2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Max pooling operation for 2D spatial data.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "MaxPool2D" }, { "pred" : "hasExactSynonym", "val" : "MaxPool2d" }, { "pred" : "hasExactSynonym", "val" : "MaxPooling2D" }, { "pred" : "hasExactSynonym", "val" : "MaxPooling2d" } ] } }, { "id" : "https://w3id.org/aio/MaxPooling3D_Layer", "lbl" : "MaxPooling3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Max pooling operation for 3D data (spatial or spatio-temporal). Downsamples the input along its spatial dimensions (depth, height, and width) by taking the maximum value over an input window (of size defined by pool_size) for each channel of the input. The window is shifted by strides along each dimension.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool3D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "MaxPool3D" }, { "pred" : "hasExactSynonym", "val" : "MaxPool3d" }, { "pred" : "hasExactSynonym", "val" : "MaxPooling3D" }, { "pred" : "hasExactSynonym", "val" : "MaxPooling3d" } ] } }, { "id" : "https://w3id.org/aio/MaxUnpool1D_Layer", "lbl" : "MaxUnpool1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Computes a partial inverse of MaxPool1d.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "MaxUnpool1D" }, { "pred" : "hasExactSynonym", "val" : "MaxUnpool1d" } ] } }, { "id" : "https://w3id.org/aio/MaxUnpool2D_Layer", "lbl" : "MaxUnpool2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Computes a partial inverse of MaxPool2d.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "MaxUnpool2D" }, { "pred" : "hasExactSynonym", "val" : "MaxUnpool2d" } ] } }, { "id" : "https://w3id.org/aio/MaxUnpool3D_Layer", "lbl" : "MaxUnpool3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Computes a partial inverse of MaxPool3d.", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#pooling-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "MaxUnpool3D" }, { "pred" : "hasExactSynonym", "val" : "MaxUnpool3d" } ] } }, { "id" : "https://w3id.org/aio/Maximum_Layer", "lbl" : "Maximum Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that computes the maximum (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Maximum" ] } } }, { "id" : "https://w3id.org/aio/Measurement_Bias", "lbl" : "Measurement Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises when features and labels are proxies for desired quantities, potentially leaving out important factors or introducing group or input-dependent noise that leads to differential performance.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Merging_Layer", "lbl" : "Merging Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A layer used to merge a list of inputs.", "xrefs" : [ "https://www.tutorialspoint.com/keras/keras_merge_layer.htm" ] } } }, { "id" : "https://w3id.org/aio/Meta-Learning", "lbl" : "Meta-Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Automatic learning algorithms applied to metadata about machine Learning experiments.", "xrefs" : [ "https://en.wikipedia.org/wiki/Meta_learning_(computer_science)" ] } } }, { "id" : "https://w3id.org/aio/Method", "lbl" : "Method", "type" : "CLASS", "meta" : { "definition" : { "val" : "Method parent class." } } }, { "id" : "https://w3id.org/aio/Metric_Learning", "lbl" : "Metric Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods which can learn a representation function that maps objects into an embedded space.", "xrefs" : [ "https://paperswithcode.com/task/metric-learning" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Distance Metric Learning" } ] } }, { "id" : "https://w3id.org/aio/Minimum_Layer", "lbl" : "Minimum Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that computes the minimum (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Minimum" ] } } }, { "id" : "https://w3id.org/aio/Mode_Confusion_Bias", "lbl" : "Mode Confusion Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "When modal interfaces confuse human operators, who misunderstand which mode the system is using, taking actions which are correct for a different mode but incorrect for their current situation. This is the cause of many deadly accidents, but also a source of confusion in everyday life.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Model_Selection_Bias", "lbl" : "Model Selection Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "The bias introduced while using the data to select a single seemingly “best” model from a large set of models employing many predictor variables. Model selection bias also occurs when an explanatory variable has a weak relationship with the response variable.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/MultiHeadAttention_Layer", "lbl" : "MultiHeadAttention Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "MultiHeadAttention layer. This is an implementation of multi-headed attention as described in the paper \"Attention is all you Need\" (Vaswani et al., 2017). If query, key, value are the same, then this is self-attention. Each timestep in query attends to the corresponding sequence in key, and returns a fixed-width vector.This layer first projects query, key and value. These are (effectively) a list of tensors of length num_attention_heads, where the corresponding shapes are (batch_size, , key_dim), (batch_size, , key_dim), (batch_size, , value_dim).Then, the query and key tensors are dot-producted and scaled. These are softmaxed to obtain attention probabilities. The value tensors are then interpolated by these probabilities, then concatenated back to a single tensor. Finally, the result tensor with the last dimension as value_dim can take an linear projection and return. When using MultiHeadAttention inside a custom Layer, the custom Layer must implement build() and call MultiHeadAttention's _build_from_signature(). This enables weights to be restored correctly when the model is loaded.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention" ] } } }, { "id" : "https://w3id.org/aio/Multiclass_Classification", "lbl" : "Multiclass Classification", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that lassify instances into one of three or more classes (classifying instances into one of two classes is called binary classification).", "xrefs" : [ "https://en.wikipedia.org/wiki/Multiclass_classification" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Multinomial Classification" } ] } }, { "id" : "https://w3id.org/aio/Multidimensional_Scaling", "lbl" : "Multidimensional Scaling", "type" : "CLASS", "meta" : { "definition" : { "val" : "A method that translates information about the pairwise distances among a set of objects or individuals into a configuration of points mapped into an abstract Cartesian space.", "xrefs" : [ "https://en.wikipedia.org/wiki/Multidimensional_scaling" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "MDS" } ] } }, { "id" : "https://w3id.org/aio/Multimodal_Deep_Learning", "lbl" : "Multimodal Deep Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods which can create models that can process and link information using various modalities.", "xrefs" : [ "https://arxiv.org/abs/2105.11087" ] } } }, { "id" : "https://w3id.org/aio/Multimodal_Learning", "lbl" : "Multimodal Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods which can represent the joint representations of different modalities." } } }, { "id" : "https://w3id.org/aio/Multiply_Layer", "lbl" : "Multiply Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that multiplies (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Multiply" ] } } }, { "id" : "https://w3id.org/aio/Natural_Language_Processing", "lbl" : "Natural Language Processing", "type" : "CLASS", "meta" : { "definition" : { "val" : "A subfield of linguistics, computer science, and artificial intelligence concerned with the interactions between computers and human language, in particular how to program computers to process and analyze large amounts of natural language data.", "xrefs" : [ "https://en.wikipedia.org/wiki/Natural_language_processing" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "NLP" } ] } }, { "id" : "https://w3id.org/aio/Network", "lbl" : "Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "Network parent class" } } }, { "id" : "https://w3id.org/aio/Neural_Turing_Machine_Network", "lbl" : "Neural Turing Machine Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A Neural Turing machine (NTMs) is a recurrent neural network model. The approach was published by Alex Graves et al. in 2014. NTMs combine the fuzzy pattern matching capabilities of neural networks with the algorithmic power of programmable computers. An NTM has a neural network controller coupled to external memory resources, which it interacts with through attentional mechanisms. The memory interactions are differentiable end-to-end, making it possible to optimize them using gradient descent. An NTM with a long short-term memory (LSTM) network controller can infer simple algorithms such as copying, sorting, and associative recall from examples alone.", "xrefs" : [ "https://en.wikipedia.org/wiki/Neural_Turing_machine" ] }, "comments" : [ "Input, Hidden, Spiking Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "NTM" } ] } }, { "id" : "https://w3id.org/aio/Noise_Dense_Layer", "lbl" : "Noise Dense Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Noisy dense layer that injects random noise to the weights of dense layer. Noisy dense layers are fully connected layers whose weights and biases are augmented by factorised Gaussian noise. The factorised Gaussian noise is controlled through gradient descent by a second weights layer. A NoisyDense layer implements the operation: $$ mathrm{NoisyDense}(x) = mathrm{activation}(mathrm{dot}(x, mu + (sigma cdot epsilon)) mathrm{bias}) $$ where mu is the standard weights layer, epsilon is the factorised Gaussian noise, and delta is a second weights layer which controls epsilon.", "xrefs" : [ "https://www.tensorflow.org/addons/api_docs/python/tfa/layers/NoisyDense" ] } } }, { "id" : "https://w3id.org/aio/Normalization_Layer", "lbl" : "Normalization Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which normalizes continuous features.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Normalization" ] } } }, { "id" : "https://w3id.org/aio/Numerical_Features_Preprocessing_Layer", "lbl" : "Numerical Features Preprocessing Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A layer that performs numerical data preprocessing operations.", "xrefs" : [ "https://keras.io/guides/preprocessing_layers/" ] } } }, { "id" : "https://w3id.org/aio/One-shot_Learning", "lbl" : "One-shot Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "A method which aims to classify objects from one, or only a few, examples.", "xrefs" : [ "https://en.wikipedia.org/wiki/One-shot_learning" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "OSL" } ] } }, { "id" : "https://w3id.org/aio/Output_Layer", "lbl" : "Output Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "The output layer in an artificial neural network is the last layer of neurons that produces given outputs for the program. Though they are made much like other artificial neurons in the neural network, output layer neurons may be built or observed in a different way, given that they are the last “actor” nodes on the network.", "xrefs" : [ "https://www.techopedia.com/definition/33263/output-layer-neural-networks" ] } } }, { "id" : "https://w3id.org/aio/PReLU_Layer", "lbl" : "PReLU Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Parametric Rectified Linear Unit.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/PReLU" ] } } }, { "id" : "https://w3id.org/aio/Perceptron", "lbl" : "Perceptron", "type" : "CLASS", "meta" : { "definition" : { "val" : "The perceptron is an algorithm for supervised Learning of binary classifiers. A binary classifier is a function which can decide whether or not an input, represented by a vector of numbers, belongs to some specific class. It is a type of linear classifier, i.e. a classification algorithm that makes its predictions based on a linear predictor function combining a set of weights with the feature vector. (https://en.wikipedia.org/wiki/Perceptron)" }, "comments" : [ "Input, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "SLP" }, { "pred" : "hasExactSynonym", "val" : "Single Layer Perceptron" } ] } }, { "id" : "https://w3id.org/aio/Permute_Layer", "lbl" : "Permute Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Permutes the dimensions of the input according to a given pattern. Useful e.g. connecting RNNs and convnets.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Permute" ] } } }, { "id" : "https://w3id.org/aio/Pooling_Layer", "lbl" : "Pooling Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Pooling layers serve the dual purposes of mitigating the sensitivity of convolutional layers to location and of spatially downsampling representations.", "xrefs" : [ "https://d2l.ai/chapter_convolutional-neural-networks/pooling.html" ] } } }, { "id" : "https://w3id.org/aio/Popularity_Bias", "lbl" : "Popularity Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A form of selection bias that occurs when items that are more popular are more exposed and less popular items are under-represented.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Population_Bias", "lbl" : "Population Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A form of selection bias that occurs when items that are more popular are more exposed and less popular items are under-represented.aSystematic distortions in demographics or other user characteristics between a population of users represented in a dataset or on a platform and some target population.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Preprocessing_Layer", "lbl" : "Preprocessing Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A layer that performs data preprocessing operations.", "xrefs" : [ "https://www.tensorflow.org/guide/keras/preprocessing_layers" ] } } }, { "id" : "https://w3id.org/aio/Presentation_Bias", "lbl" : "Presentation Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Biases arising from how information is presented on the Web, via a user interface, due to rating or ranking of output, or through users’ own self-selected, biased interaction.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Principal_Component_Analysis", "lbl" : "Principal Component Analysis", "type" : "CLASS", "meta" : { "definition" : { "val" : "A method for analyzing large datasets containing a high number of dimensions/features per observation, increasing the interpretability of data while preserving the maximum amount of information, and enabling the visualization of multidimensional data.", "xrefs" : [ "https://en.wikipedia.org/wiki/Principal_component_analysis" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "PCA" } ] } }, { "id" : "https://w3id.org/aio/Probabilistic_Graphical_Model", "lbl" : "Probabilistic Graphical Model", "type" : "CLASS", "meta" : { "definition" : { "val" : "A probabilistic model for which a graph expresses the conditional dependence structure between random variables.", "xrefs" : [ "https://en.wikipedia.org/wiki/Graphical_model" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Graphical Model" }, { "pred" : "hasExactSynonym", "val" : "PGM" }, { "pred" : "hasExactSynonym", "val" : "Structure Probabilistic Model" } ] } }, { "id" : "https://w3id.org/aio/Probabilistic_Topic_Model", "lbl" : "Probabilistic Topic Model", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that use statistical methods to analyze the words in each text to discover common themes, how those themes are connected to each other, and how they change over time.", "xrefs" : [ "https://pyro.ai/examples/prodlda.html" ] } } }, { "id" : "https://w3id.org/aio/Processing_Bias", "lbl" : "Processing Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Judgement modulated by affect, which is influenced by the level of efficacy and efficiency in information processing; in cognitive sciences, processing bias is often referred to as an aesthetic judgement.", "xrefs" : [ "https://royalsocietypublishing.org/doi/10.1098/rspb.2019.0165#d1e5237" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Validation Bias" } ] } }, { "id" : "https://w3id.org/aio/Proportional_Hazards_Model", "lbl" : "Proportional Hazards Model", "type" : "CLASS", "meta" : { "definition" : { "val" : "A surival modeling method where the unique effect of a unit increase in a covariate is multiplicative with respect to the hazard rate.", "xrefs" : [ "https://en.wikipedia.org/wiki/Proportional_hazards_modelProportional Hazards Model" ] } } }, { "id" : "https://w3id.org/aio/RNN_Layer", "lbl" : "RNN Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Base class for recurrent layers.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN" ] } } }, { "id" : "https://w3id.org/aio/Radial_Basis_Network", "lbl" : "Radial Basis Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "Like recurrent neural networks (RNNs), transformers are designed to handle sequential input data, such as natural language, for tasks such as translation and text summarization. However, unlike RNNs, transformers do not necessarily process the data in order. Rather, the attention mechanism provides context for any position in the input sequence.", "xrefs" : [ "https://en.wikipedia.org/wiki/Radial_basis_function_network" ] }, "comments" : [ "Input, Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "RBFN" }, { "pred" : "hasExactSynonym", "val" : "RBN" }, { "pred" : "hasExactSynonym", "val" : "Radial Basis Function Network" } ] } }, { "id" : "https://w3id.org/aio/RandomBrightness_Layer", "lbl" : "RandomBrightness Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which randomly adjusts brightness during training. This layer will randomly increase/reduce the brightness for the input RGB images. At inference time, the output will be identical to the input. Call the layer with training=True to adjust the brightness of the input. Note that different brightness adjustment factors will be apply to each the images in the batch.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RandomBrightness" ] } } }, { "id" : "https://w3id.org/aio/RandomContrast_Layer", "lbl" : "RandomContrast Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which randomly adjusts contrast during training. This layer will randomly adjust the contrast of an image or images by a random factor. Contrast is adjusted independently for each channel of each image during training. For each channel, this layer computes the mean of the image pixels in the channel and then adjusts each component x of each pixel to (x - mean) * contrast_factor + mean. Input pixel values can be of any range (e.g. [0., 1.) or [0, 255]) and in integer or floating point dtype. By default, the layer will output floats. The output value will be clipped to the range [0, 255], the valid range of RGB colors.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RandomContrast" ] } } }, { "id" : "https://w3id.org/aio/RandomCrop_Layer", "lbl" : "RandomCrop Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which randomly crops images during training. During training, this layer will randomly choose a location to crop images down to a target size. The layer will crop all the images in the same batch to the same cropping location. At inference time, and during training if an input image is smaller than the target size, the input will be resized and cropped so as to return the largest possible window in the image that matches the target aspect ratio. If you need to apply random cropping at inference time, set training to True when calling the layer. Input pixel values can be of any range (e.g. [0., 1.) or [0, 255]) and of interger or floating point dtype. By default, the layer will output floats.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RandomCrop" ] } } }, { "id" : "https://w3id.org/aio/RandomFlip_Layer", "lbl" : "RandomFlip Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which randomly flips images during training. This layer will flip the images horizontally and or vertically based on the mode attribute. During inference time, the output will be identical to input. Call the layer with training=True to flip the input. Input pixel values can be of any range (e.g. [0., 1.) or [0, 255]) and of interger or floating point dtype. By default, the layer will output floats.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RandomFlip" ] } } }, { "id" : "https://w3id.org/aio/RandomHeight_Layer", "lbl" : "RandomHeight Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which randomly varies image height during training. This layer adjusts the height of a batch of images by a random factor. The input should be a 3D (unbatched) or 4D (batched) tensor in the \"channels_last\" image data format. Input pixel values can be of any range (e.g. [0., 1.) or [0, 255]) and of interger or floating point dtype. By default, the layer will output floats. By default, this layer is inactive during inference.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RandomHeight" ] } } }, { "id" : "https://w3id.org/aio/RandomRotation_Layer", "lbl" : "RandomRotation Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which randomly rotates images during training.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RandomRotation" ] } } }, { "id" : "https://w3id.org/aio/RandomTranslation_Layer", "lbl" : "RandomTranslation Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which randomly translates images during training. This layer will apply random translations to each image during training, filling empty space according to fill_mode. aInput pixel values can be of any range (e.g. [0., 1.) or [0, 255]) and of interger or floating point dtype. By default, the layer will output floats.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RandomTranslation" ] } } }, { "id" : "https://w3id.org/aio/RandomWidth_Layer", "lbl" : "RandomWidth Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which randomly varies image width during training. This layer will randomly adjusts the width of a batch of images of a batch of images by a random factor. The input should be a 3D (unbatched) or 4D (batched) tensor in the \"channels_last\" image data format. Input pixel values can be of any range (e.g. [0., 1.) or [0, 255]) and of interger or floating point dtype. By default, the layer will output floats. By default, this layer is inactive during inference.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RandomWidth" ] } } }, { "id" : "https://w3id.org/aio/RandomZoom_Layer", "lbl" : "RandomZoom Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which randomly zooms images during training. This layer will randomly zoom in or out on each axis of an image independently, filling empty space according to fill_mode.Input pixel values can be of any range (e.g. [0., 1.) or [0, 255]) and of interger or floating point dtype. By default, the layer will output floats.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RandomZoom" ] } } }, { "id" : "https://w3id.org/aio/Random_Effects_Model", "lbl" : "Random Effects Model", "type" : "CLASS", "meta" : { "definition" : { "val" : "A statistical model where the model parameters are random variables.", "xrefs" : [ "https://en.wikipedia.org/wiki/Random_effects_model" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "REM" } ] } }, { "id" : "https://w3id.org/aio/Random_Forest", "lbl" : "Random Forest", "type" : "CLASS", "meta" : { "definition" : { "val" : "An ensemble learning method for classification, regression and other tasks that operates by constructing a multitude of decision trees at training time.", "xrefs" : [ "https://en.wikipedia.org/wiki/Random_forest" ] } } }, { "id" : "https://w3id.org/aio/Ranking_Bias", "lbl" : "Ranking Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "The idea that top-ranked results are the most relevant and important and will result in more clicks than other results.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Rashomon_Effect_Bias", "lbl" : "Rashomon Effect Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Refers to differences in perspective, memory and recall, interpretation, and reporting on the same event from multiple persons or witnesses.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Rashomon Effect" }, { "pred" : "hasExactSynonym", "val" : "Rashomon Principle" } ] } }, { "id" : "https://w3id.org/aio/ReLU_Function", "lbl" : "ReLU Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "The ReLU activation function returns: max(x, 0), the element-wise maximum of 0 and the input tensor.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/relu" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "ReLU" }, { "pred" : "hasExactSynonym", "val" : "Rectified Linear Unit" } ] } }, { "id" : "https://w3id.org/aio/ReLU_Layer", "lbl" : "ReLU Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Rectified Linear Unit activation function. With default values, it returns element-wise max(x, 0).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/ReLU" ] } } }, { "id" : "https://w3id.org/aio/Recurrent_Layer", "lbl" : "Recurrent Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A layer of an RNB, composed of recurrent units and with the number of which is the hidden size of the layer.", "xrefs" : [ "https://docs.nvidia.com/deepLearning/performance/dl-performance-recurrent/index.html#recurrent-layer" ] } } }, { "id" : "https://w3id.org/aio/Recurrent_Neural_Network", "lbl" : "Recurrent Neural Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A recurrent neural network (RNN) is a class of artificial neural networks where connections between nodes form a directed graph along a temporal sequence. This allows it to exhibit temporal dynamic behavior. Derived from feedforward neural networks, RNNs can use their internal state (memory) to process variable length sequences of inputs.", "xrefs" : [ "https://en.wikipedia.org/wiki/Recurrent_neural_network" ] }, "comments" : [ "Input, Memory Cell, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "RN" }, { "pred" : "hasExactSynonym", "val" : "RecNN" }, { "pred" : "hasExactSynonym", "val" : "Recurrent Network" } ] } }, { "id" : "https://w3id.org/aio/Recursive_Neural_Network", "lbl" : "Recursive Neural Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A recursive neural network is a kind of deep neural network created by applying the same set of weights recursively over a structured input, to produce a structured prediction over variable-size input structures, or a scalar prediction on it, by traversing a given structure in topological order. Recursive neural networks, sometimes abbreviated as RvNNs, have been successful, for instance, in Learning sequence and tree structures in natural language processing, mainly phrase and sentence continuous representations based on word embedding.", "xrefs" : [ "https://en.wikipedia.org/wiki/Recursive_neural_network" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "RecuNN" }, { "pred" : "hasExactSynonym", "val" : "RvNN" } ] } }, { "id" : "https://w3id.org/aio/Regression_Analysis", "lbl" : "Regression Analysis", "type" : "CLASS", "meta" : { "definition" : { "val" : "A set of statistical processes for estimating the relationships between a dependent variable (often called the 'outcome' or 'response' variable, or a 'label' in machine learning parlance) and one or more independent variables (often called 'predictors', 'covariates', 'explanatory variables' or 'features').", "xrefs" : [ "https://en.wikipedia.org/wiki/Regression_analysis" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Regression analysis" }, { "pred" : "hasExactSynonym", "val" : "Regression model" } ] } }, { "id" : "https://w3id.org/aio/Regularization_Layer", "lbl" : "Regularization Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Regularizers allow you to apply penalties on layer parameters or layer activity during optimization. These penalties are summed into the loss function that the network optimizes. Regularization penalties are applied on a per-layer basis.", "xrefs" : [ "https://keras.io/api/layers/regularizers/" ] } } }, { "id" : "https://w3id.org/aio/Reinforcement_Learning", "lbl" : "Reinforcement Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that do not need labelled input/output pairs be presented, nor needing sub-optimal actions to be explicitly corrected. Instead they focus on finding a balance between exploration (of uncharted territory) and exploitation (of current knowledge).", "xrefs" : [ "https://en.wikipedia.org/wiki/Reinforcement_learning" ] } } }, { "id" : "https://w3id.org/aio/RepeatVector_Layer", "lbl" : "RepeatVector Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Repeats the input n times.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RepeatVector" ] } } }, { "id" : "https://w3id.org/aio/Representation_Bias", "lbl" : "Representation Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises due to non-random sampling of subgroups, causing trends estimated for one population to not be generalizable to data collected from a new population.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Representation_Learning", "lbl" : "Representation Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that allow a system to discover the representations required for feature detection or classification from raw data.", "xrefs" : [ "https://en.wikipedia.org/wiki/Feature_learning" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Feature Learning" } ] } }, { "id" : "https://w3id.org/aio/Rescaling_Layer", "lbl" : "Rescaling Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which rescales input values to a new range.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Rescaling" ] } } }, { "id" : "https://w3id.org/aio/Reshape_Layer", "lbl" : "Reshape Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that reshapes inputs into the given shape.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Reshape" ] } } }, { "id" : "https://w3id.org/aio/Reshaping_Layer", "lbl" : "Reshaping Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Reshape layers are used to change the shape of the input." } } }, { "id" : "https://w3id.org/aio/Residual_Neural_Network", "lbl" : "Residual Neural Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A residual neural network (ResNet) is an artificial neural network (ANN) of a kind that builds on constructs known from pyramidal cells in the cerebral cortex. Residual neural networks do this by utilizing skip connections, or shortcuts to jump over some layers. Typical ResNet models are implemented with double- or triple- layer skips that contain nonlinearities (ReLU) and batch normalization in between. An additional weight matrix may be used to learn the skip weights; these models are known as HighwayNets. Models with several parallel skips are referred to as DenseNets. In the context of residual neural networks, a non-residual network may be described as a 'plain network'.", "xrefs" : [ "https://en.wikipedia.org/wiki/Residual_neural_network" ] }, "comments" : [ "Input, Weight, BN, ReLU, Weight, BN, Addition, ReLU" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "DRN" }, { "pred" : "hasExactSynonym", "val" : "Deep Residual Network" }, { "pred" : "hasExactSynonym", "val" : "ResNN" }, { "pred" : "hasExactSynonym", "val" : "ResNet" } ] } }, { "id" : "https://w3id.org/aio/Resizing_Layer", "lbl" : "Resizing Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which resizes images. This layer resizes an image input to a target height and width. The input should be a 4D (batched) or 3D (unbatched) tensor in \"channels_last\" format. Input pixel values can be of any range (e.g. [0., 1.) or [0, 255]) and of interger or floating point dtype. By default, the layer will output floats. This layer can be called on tf.RaggedTensor batches of input images of distinct sizes, and will resize the outputs to dense tensors of uniform size.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Resizing" ] } } }, { "id" : "https://w3id.org/aio/Restricted_Boltzmann_Machine", "lbl" : "Restricted Boltzmann Machine", "type" : "CLASS", "meta" : { "definition" : { "val" : "A restricted Boltzmann machine (RBM) is a generative stochastic artificial neural network that can learn a probability distribution over its set of inputs.", "xrefs" : [ "https://en.wikipedia.org/wiki/Restricted_Boltzmann_machine" ] }, "comments" : [ "Backfed Input, Probabilistic Hidden" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "RBM" } ] } }, { "id" : "https://w3id.org/aio/Ridge_Regression", "lbl" : "Ridge Regression", "type" : "CLASS", "meta" : { "definition" : { "val" : "A method of estimating the coefficients of multiple-regression models in scenarios where the independent variables are highly correlated.[1] It has been used in many fields including econometrics, chemistry, and engineering.", "xrefs" : [ "https://en.wikipedia.org/wiki/Ridge_regression" ] } } }, { "id" : "https://w3id.org/aio/SeLu_Function", "lbl" : "SELU Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "The SELU activation function multiplies scale (> 1) with the output of the ELU function to ensure a slope larger than one for positive inputs.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/selu" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "SELU" }, { "pred" : "hasExactSynonym", "val" : "Scaled Exponential Linear Unit" } ] } }, { "id" : "https://w3id.org/aio/Selection_And_Sampling_Bias", "lbl" : "Selection And Sampling Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Bias introduced by the selection of individuals, groups, or data for analysis in such a way that proper randomization is not achieved, thereby failing to ensure that the sample obtained is representative of the population intended to be analyzed.", "xrefs" : [ "https://en.wikipedia.org/wiki/Selection_bias" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Sampling Bias" }, { "pred" : "hasExactSynonym", "val" : "Selection Bias" }, { "pred" : "hasExactSynonym", "val" : "Selection Effect" } ] } }, { "id" : "https://w3id.org/aio/Selective_Adherence_Bias", "lbl" : "Selective Adherence Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Decision-makers’ inclination to selectively adopt algorithmic advice when it matches their pre-existing beliefs and stereotypes.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Self-supervised_Learning", "lbl" : "Self-supervised Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Regarded as an intermediate form between supervised and unsupervised learning.", "xrefs" : [ "https://en.wikipedia.org/wiki/Self-supervised_learning" ] } } }, { "id" : "https://w3id.org/aio/SeparableConvolution1D_Layer", "lbl" : "SeparableConvolution1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Depthwise separable 1D convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If use_bias is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output.a", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/SeparableConv1D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "SeparableConv1D Layer" } ] } }, { "id" : "https://w3id.org/aio/SeparableConvolution2D_Layer", "lbl" : "SeparableConvolution2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Depthwise separable 2D convolution. Separable convolutions consist of first performing a depthwise spatial convolution (which acts on each input channel separately) followed by a pointwise convolution which mixes the resulting output channels. The depth_multiplier argument controls how many output channels are generated per input channel in the depthwise step. Intuitively, separable convolutions can be understood as a way to factorize a convolution kernel into two smaller kernels, or as an extreme version of an Inception block.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/SeparableConv2D" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "SeparableConv2D Layer" } ] } }, { "id" : "https://w3id.org/aio/Sigmoid_Function", "lbl" : "Sigmoid Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies the sigmoid activation function sigmoid(x) = 1 / (1 + exp(-x)). For small values (<-5), sigmoid returns a value close to zero, and for large values (>5) the result of the function gets close to 1. Sigmoid is equivalent to a 2-element Softmax, where the second element is assumed to be zero. The sigmoid function always returns a value between 0 and 1.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/sigmoid" ] } } }, { "id" : "https://w3id.org/aio/SimpleRNNCell_Layer", "lbl" : "SimpleRNNCell Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Cell class for SimpleRNN. This class processes one step within the whole time sequence input, whereas tf.keras.layer.SimpleRNN processes the whole sequence.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/SimpleRNNCell" ] } } }, { "id" : "https://w3id.org/aio/SimpleRNN_Layer", "lbl" : "SimpleRNN Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Fully-connected RNN where the output is to be fed back to input.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/SimpleRNN" ] } } }, { "id" : "https://w3id.org/aio/Societal_Bias", "lbl" : "Societal Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Can be positive or negative, and take a number of different forms, but is typically characterized as being for or against groups or individuals based on social identities, demographic factors, or immutable physical characteristics. Societal or social biases are often stereotypes. Common examples of societal or social biases are based on concepts like race, ethnicity, gender, sexual orientation, socioeconomic status, education, and more. Societal bias is often recognized and discussed in the context of NLP (Natural Language Processing) models.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Social Bias" } ] } }, { "id" : "https://w3id.org/aio/Softmax_Function", "lbl" : "Softmax Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "The elements of the output vector are in range (0, 1) and sum to 1. Each vector is handled independently. The axis argument sets which axis of the input the function is applied along. Softmax is often used as the activation for the last layer of a classification network because the result could be interpreted as a probability distribution. The softmax of each vector x is computed as exp(x) / tf.reduce_sum(exp(x)). The input values in are the log-odds of the resulting probability.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax" ] } } }, { "id" : "https://w3id.org/aio/Softmax_Layer", "lbl" : "Softmax Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Softmax activation function.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Softmax" ] } } }, { "id" : "https://w3id.org/aio/Softplus_Function", "lbl" : "Softplus Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "softplus(x) = log(exp(x) + 1)", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/softplus" ] } } }, { "id" : "https://w3id.org/aio/Softsign_Function", "lbl" : "Softsign Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "softsign(x) = x / (abs(x) + 1)", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/softsign" ] } } }, { "id" : "https://w3id.org/aio/Sparse_AE", "lbl" : "Sparse AE", "type" : "CLASS", "meta" : { "definition" : { "val" : "Sparse autoencoders may include more (rather than fewer) hidden units than inputs, but only a small number of the hidden units are allowed to be active at the same time (thus, sparse). This constraint forces the model to respond to the unique statistical features of the training data. (https://en.wikipedia.org/wiki/Autoencoder)" }, "comments" : [ "Input, Hidden, Matched Output-Input" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "SAE" } ] } }, { "id" : "https://w3id.org/aio/Sparse_Learning", "lbl" : "Sparse Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods which aim to find sparse representations of the input data in the form of a linear combination of basic elements as well as those basic elements themselves.", "xrefs" : [ "https://en.wikipedia.org/wiki/Sparse_dictionary_learning" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Sparse coding" }, { "pred" : "hasExactSynonym", "val" : "Sparse dictionary Learning" } ] } }, { "id" : "https://w3id.org/aio/SpatialDropout1D_Layer", "lbl" : "SpatialDropout1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Spatial 1D version of Dropout. This version performs the same function as Dropout, however, it drops entire 1D feature maps instead of individual elements. If adjacent frames within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective Learning rate decrease. In this case, SpatialDropout1D will help promote independence between feature maps and should be used instead.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/SpatialDropout1D" ] } } }, { "id" : "https://w3id.org/aio/SpatialDropout2D_Layer", "lbl" : "SpatialDropout2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Spatial 2D version of Dropout. This version performs the same function as Dropout, however, it drops entire 2D feature maps instead of individual elements. If adjacent pixels within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective Learning rate decrease. In this case, SpatialDropout2D will help promote independence between feature maps and should be used instead.a", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/SpatialDropout2D" ] } } }, { "id" : "https://w3id.org/aio/SpatialDropout3D_Layer", "lbl" : "SpatialDropout3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Spatial 3D version of Dropout. This version performs the same function as Dropout, however, it drops entire 3D feature maps instead of individual elements. If adjacent voxels within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective Learning rate decrease. In this case, SpatialDropout3D will help promote independence between feature maps and should be used instead.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/SpatialDropout3D" ] } } }, { "id" : "https://w3id.org/aio/Spatial_Regression", "lbl" : "Spatial Regression", "type" : "CLASS", "meta" : { "definition" : { "val" : "Regression method used to model spatial relationships.", "xrefs" : [ "https://gisgeography.com/spatial-regression-models-arcgis/" ] } } }, { "id" : "https://w3id.org/aio/StackedRNNCells_Layer", "lbl" : "StackedRNNCells Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Wrapper allowing a stack of RNN cells to behave as a single cell. Used to implement efficient stacked RNNs.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/StackedRNNCells" ] } } }, { "id" : "https://w3id.org/aio/Streetlight_Effect_Bias", "lbl" : "Streetlight Effect Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A bias whereby people tend to search only where it is easiest to look.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Streetlight Effect" } ] } }, { "id" : "https://w3id.org/aio/StringLookup_Layer", "lbl" : "StringLookup Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which maps string features to integer indices.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/StringLookup" ] } } }, { "id" : "https://w3id.org/aio/Subtract_Layer", "lbl" : "Subtract Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Layer that subtracts two inputs. It takes as input a list of tensors of size 2, both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]), also of the same shape.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Subtract" ] } } }, { "id" : "https://w3id.org/aio/Sunk_Cost_Fallacy_Bias", "lbl" : "Sunk Cost Fallacy Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A human tendency where people opt to continue with an endeavor or behavior due to previously spent or invested resources, such as money, time, and effort, regardless of whether costs outweigh benefits. For example, in AI, the sunk cost fallacy could lead development teams and organizations to feel that because they have already invested so much time and money into a particular AI application, they must pursue it to market rather than deciding to end the effort, even in the face of significant technical debt and/or ethical debt.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Sunk Cost Fallacy" } ] } }, { "id" : "https://w3id.org/aio/Supervised_Biclustering", "lbl" : "Supervised Biclustering", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that simultaneously cluster the rows and columns of a labeled matrix, also taking into account the data label contributions to cluster coherence.", "xrefs" : [ "https://en.wikipedia.org/wiki/Biclustering" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Supervised Block Clustering" }, { "pred" : "hasExactSynonym", "val" : "Supervised Co-clustering" }, { "pred" : "hasExactSynonym", "val" : "Supervised Joint Clustering" }, { "pred" : "hasExactSynonym", "val" : "Supervised Two-mode Clustering" }, { "pred" : "hasExactSynonym", "val" : "Supervised Two-way Clustering" } ] } }, { "id" : "https://w3id.org/aio/Supervised_Clustering", "lbl" : "Supervised Clustering", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that group a set of labeled objects in such a way that objects in the same group (called a cluster) are more similarly labeled (in some sense) relative to those in other groups (clusters).", "xrefs" : [ "https://en.wikipedia.org/wiki/Cluster_analysis" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Cluster analysis" } ] } }, { "id" : "https://w3id.org/aio/Supervised_Learning", "lbl" : "Supervised Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that can learn a function that maps an input to an output based on example input-output pairs.", "xrefs" : [ "https://en.wikipedia.org/wiki/Supervised_learning" ] } } }, { "id" : "https://w3id.org/aio/Support_Vector_Machine", "lbl" : "Support Vector Machine", "type" : "CLASS", "meta" : { "definition" : { "val" : "In machine Learning, support-vector machines (SVMs, also support-vector networks) are supervised Learning models with associated Learning algorithms that analyze data for classification and regression analysis. Developed at AT&T Bell Laboratories by Vladimir Vapnik with colleagues (Boser et al., 1992, Guyon et al., 1993, Vapnik et al., 1997) SVMs are one of the most robust prediction methods, being based on statistical Learning frameworks or VC theory proposed by Vapnik (1982, 1995) and Chervonenkis (1974). Given a set of training examples, each marked as belonging to one of two categories, an SVM training algorithm builds a model that assigns new examples to one category or the other, making it a non-probabilistic binary linear classifier (although methods such as Platt scaling exist to use SVM in a probabilistic classification setting). SVM maps training examples to points in space so as to maximise the width of the gap between the two categories. New examples are then mapped into that same space and predicted to belong to a category based on which side of the gap they fall.", "xrefs" : [ "https://en.wikipedia.org/wiki/Support-vector_machine" ] }, "comments" : [ "Input, Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "SVM" }, { "pred" : "hasExactSynonym", "val" : "SVN" }, { "pred" : "hasExactSynonym", "val" : "Supper Vector Network" } ] } }, { "id" : "https://w3id.org/aio/Survival_Analysis", "lbl" : "Survival Analysis", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods for nalyzing the expected duration of time until one event occurs, such as death in biological organisms and failure in mechanical systems.", "xrefs" : [ "https://en.wikipedia.org/wiki/Survival_analysis" ] } } }, { "id" : "https://w3id.org/aio/Survivorship_Bias", "lbl" : "Survivorship Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Tendency for people to focus on the items, observations, or people that “survive” or make it past a selection process, while overlooking those that did not.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Swish_Function", "lbl" : "Swish Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "x*sigmoid(x). It is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is unbounded above and bounded below.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/swish" ] } } }, { "id" : "https://w3id.org/aio/Symmetrically_Connected_Network", "lbl" : "Symmetrically Connected Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "Like recurrent networks, but the connections between units are symmetrical (they have the same weight in both directions).", "xrefs" : [ "https://ieeexplore.ieee.org/document/287176" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "SCN" } ] } }, { "id" : "https://w3id.org/aio/SyncBatchNorm_Layer", "lbl" : "SyncBatchNorm Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Applies Batch Normalization over a N-Dimensional input (a mini-batch of [N-2]D inputs with additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .", "xrefs" : [ "https://pytorch.org/docs/stable/nn.html#normalization-layers" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "SyncBatchNorm" }, { "pred" : "hasExactSynonym", "val" : "nn.SyncBatchNorm" } ] } }, { "id" : "https://w3id.org/aio/Systemic_Bias", "lbl" : "Systemic Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Biases that result from procedures and practices of particular institutions that operate in ways which result in certain social groups being advantaged or favored and others being disadvantaged or devalued.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Institutional Bias" }, { "pred" : "hasExactSynonym", "val" : "Societal Bias" } ] } }, { "id" : "https://w3id.org/aio/Tanh_Function", "lbl" : "Tanh Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "Hyperbolic tangent activation function.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/tanh" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "hyperbolic tangent" } ] } }, { "id" : "https://w3id.org/aio/Temporal_Bias", "lbl" : "Temporal Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Bias that arises from differences in populations and behaviors over time.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/TextVectorization_Layer", "lbl" : "TextVectorization Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A preprocessing layer which maps text features to integer sequences.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/TextVectorization" ] } } }, { "id" : "https://w3id.org/aio/Text_Preprocessing_Layer", "lbl" : "Text Preprocessing Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "A layer that performs text data preprocessing operations.", "xrefs" : [ "https://keras.io/guides/preprocessing_layers/" ] } } }, { "id" : "https://w3id.org/aio/ThresholdedReLU_Layer", "lbl" : "ThresholdedReLU Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Thresholded Rectified Linear Unit.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/ThresholdedReLU" ] } } }, { "id" : "https://w3id.org/aio/TimeDistributed_Layer", "lbl" : "TimeDistributed Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "This wrapper allows to apply a layer to every temporal slice of an input. Every input should be at least 3D, and the dimension of index one of the first input will be considered to be the temporal dimension. Consider a batch of 32 video samples, where each sample is a 128x128 RGB image with channels_last data format, across 10 timesteps. The batch input shape is (32, 10, 128, 128, 3). You can then use TimeDistributed to apply the same Conv2D layer to each of the 10 timesteps, independently:", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/TimeDistributed" ] } } }, { "id" : "https://w3id.org/aio/Time_Series_Analysis", "lbl" : "Time Series Analysis", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods for analyzing time series data in order to extract meaningful statistics and other characteristics of the data.", "xrefs" : [ "https://en.wikipedia.org/wiki/Time_series" ] } } }, { "id" : "https://w3id.org/aio/Time_Series_Forecasting", "lbl" : "Time Series Forecasting", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that predict future values based on previously observed values.", "xrefs" : [ "https://en.wikipedia.org/wiki/Time_series" ] } } }, { "id" : "https://w3id.org/aio/Transfer_Learning", "lbl" : "Transfer Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods which can reuse or transfer information from previously learned tasks for the Learning of new tasks.", "xrefs" : [ "https://en.wikipedia.org/wiki/Transfer_learning" ] } } }, { "id" : "https://w3id.org/aio/Transformer_Network", "lbl" : "Transformer Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "A transformer is a deep Learning model that adopts the mechanism of attention, differentially weighing the significance of each part of the input data. It is used primarily in the field of natural language processing (NLP) and in computer vision (CV). (https://en.wikipedia.org/wiki/Transformer_(machine_Learning_model))", "xrefs" : [ "https://en.wikipedia.org/wiki/Transformer_(machine_Learning_model)" ] } } }, { "id" : "https://w3id.org/aio/Uncertainty_Bias", "lbl" : "Uncertainty Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises when predictive algorithms favor groups that are better represented in the training data, since there will be less uncertainty associated with those predictions.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/UnitNormalization_Layer", "lbl" : "UnitNormalization Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Unit normalization layer. Normalize a batch of inputs so that each input in the batch has a L2 norm equal to 1 (across the axes specified in axis).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/UnitNormalization" ] } } }, { "id" : "https://w3id.org/aio/Unsupervised_Biclustering", "lbl" : "Unsupervised Biclustering", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that simultaneously cluster the rows and columns of an unlabeled input matrix.", "xrefs" : [ "https://en.wikipedia.org/wiki/Biclustering" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Block Clustering" }, { "pred" : "hasExactSynonym", "val" : "Co-clustering" }, { "pred" : "hasExactSynonym", "val" : "Joint Clustering" }, { "pred" : "hasExactSynonym", "val" : "Two-mode Clustering" }, { "pred" : "hasExactSynonym", "val" : "Two-way Clustering" } ] } }, { "id" : "https://w3id.org/aio/Unsupervised_Clustering", "lbl" : "Unsupervised Clustering", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods that group a set of objects in such a way that objects without labels in the same group (called a cluster) are more similar (in some sense) to each other than to those in other groups (clusters).", "xrefs" : [ "https://en.wikipedia.org/wiki/Cluster_analysis" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Cluster analysis" } ] } }, { "id" : "https://w3id.org/aio/Unsupervised_Learning", "lbl" : "Unsupervised Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Algorithms that learns patterns from unlabeled data.", "xrefs" : [ "https://en.wikipedia.org/wiki/Unsupervised_learning" ] } } }, { "id" : "https://w3id.org/aio/Unsupervised_Pretrained_Network", "lbl" : "Unsupervised Pretrained Network", "type" : "CLASS", "meta" : { "definition" : { "val" : "Unsupervised pre-training initializes a discriminative neural net from one which was trained using an unsupervised criterion, such as a deep belief network or a deep autoencoder. This method can sometimes help with both the optimization and the overfitting issues.", "xrefs" : [ "https://metacademy.org/graphs/concepts/unsupervised_pre_training#:~:text=Unsupervised%20pre%2Dtraining%20initializes%20a,optimization%20and%20the%20overfitting%20issues" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "UPN" } ] } }, { "id" : "https://w3id.org/aio/UpSampling1D_Layer", "lbl" : "UpSampling1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Upsampling layer for 1D inputs. Repeats each temporal step size times along the time axis.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/UpSampling1D" ] } } }, { "id" : "https://w3id.org/aio/UpSampling2D_Layer", "lbl" : "UpSampling2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Upsampling layer for 2D inputs. Repeats the rows and columns of the data by size[0] and size[1] respectively.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/UpSampling2D" ] } } }, { "id" : "https://w3id.org/aio/UpSampling3D_Layer", "lbl" : "UpSampling3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Upsampling layer for 3D inputs.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/UpSampling3D" ] } } }, { "id" : "https://w3id.org/aio/Use_And_Interpretation_Bias", "lbl" : "Use And Interpretation Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "An information-processing bias, the tendency to inappropriately analyze ambiguous stimuli, scenarios and events.", "xrefs" : [ "https://en.wikipedia.org/wiki/Interpretive_bias" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Interpretive Bias" } ] } }, { "id" : "https://w3id.org/aio/User_Interaction_Bias", "lbl" : "User Interaction Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "Arises when a user imposes their own self-selected biases and behavior during interaction with data, output, results, etc.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] } } }, { "id" : "https://w3id.org/aio/Variational_Auto_Encoder", "lbl" : "Variational Auto Encoder", "type" : "CLASS", "meta" : { "definition" : { "val" : "Variational autoencoders are meant to compress the input information into a constrained multivariate latent distribution (encoding) to reconstruct it as accurately as possible (decoding). (https://en.wikipedia.org/wiki/Variational_autoencoder)" }, "comments" : [ "Input, Probabilistic Hidden, Matched Output-Input" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "VAE" } ] } }, { "id" : "https://w3id.org/aio/Wrapper_Layer", "lbl" : "Wrapper Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Abstract wrapper base class. Wrappers take another layer and augment it in various ways. Do not use this class as a layer, it is only an abstract base class. Two usable wrappers are the TimeDistributed and Bidirectional wrappers.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Wrapper" ] } } }, { "id" : "https://w3id.org/aio/Zero-shot_Learning", "lbl" : "Zero-shot Learning", "type" : "CLASS", "meta" : { "definition" : { "val" : "Methods where at test time, a learner observes samples from classes, which were not observed during training, and needs to predict the class that they belong to.", "xrefs" : [ "https://en.wikipedia.org/wiki/Zero-shot_learning" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "ZSL" } ] } }, { "id" : "https://w3id.org/aio/ZeroPadding1D_Layer", "lbl" : "ZeroPadding1D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Zero-padding layer for 1D input (e.g. temporal sequence).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/ZeroPadding1D" ] } } }, { "id" : "https://w3id.org/aio/ZeroPadding2D_Layer", "lbl" : "ZeroPadding2D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Zero-padding layer for 2D input (e.g. picture). This layer can add rows and columns of zeros at the top, bottom, left and right side of an image tensor.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/ZeroPadding2D" ] } } }, { "id" : "https://w3id.org/aio/ZeroPadding3D_Layer", "lbl" : "ZeroPadding3D Layer", "type" : "CLASS", "meta" : { "definition" : { "val" : "Zero-padding layer for 3D data (spatial or spatio-temporal).", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/layers/ZeroPadding3D" ] } } }, { "id" : "https://w3id.org/aio/eLu_Function", "lbl" : "ELU Function", "type" : "CLASS", "meta" : { "definition" : { "val" : "The exponential linear unit (ELU) with alpha > 0 is: x if x > 0 and alpha * (exp(x) - 1) if x < 0 The ELU hyperparameter alpha controls the value to which an ELU saturates for negative net inputs. ELUs diminish the vanishing gradient effect. ELUs have negative values which pushes the mean of the activations closer to zero. Mean activations that are closer to zero enable faster Learning as they bring the gradient closer to the natural gradient. ELUs saturate to a negative value when the argument gets smaller. Saturation means a small derivative which decreases the variation and the information that is propagated to the next layer.", "xrefs" : [ "https://www.tensorflow.org/api_docs/python/tf/keras/activations/elu" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "ELU" }, { "pred" : "hasExactSynonym", "val" : "Exponential Linear Unit" } ] } }, { "id" : "https://w3id.org/aio/node2vec-CBOW", "lbl" : "node2vec-CBOW", "type" : "CLASS", "meta" : { "definition" : { "val" : "In the continuous bag-of-words architecture, the model predicts the current node from a window of surrounding context nodes. The order of context nodes does not influence prediction (bag-of-words assumption).", "xrefs" : [ "https://en.wikipedia.org/wiki/Word2vec" ] }, "comments" : [ "Input, Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "N2V-CBOW" }, { "pred" : "hasRelatedSynonym", "val" : "CBOW" } ] } }, { "id" : "https://w3id.org/aio/node2vec-SkipGram", "lbl" : "node2vec-SkipGram", "type" : "CLASS", "meta" : { "definition" : { "val" : "In the continuous skip-gram architecture, the model uses the current node to predict the surrounding window of context nodes. The skip-gram architecture weighs nearby context nodes more heavily than more distant context nodes. (https://en.wikipedia.org/wiki/Word2vec)", "xrefs" : [ "https://en.wikipedia.org/wiki/Word2vec" ] }, "comments" : [ "Input, Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "N2V-SkipGram" }, { "pred" : "hasRelatedSynonym", "val" : "SkipGram" } ] } }, { "id" : "https://w3id.org/aio/t-Distributed_Stochastic_Neighbor_embedding", "lbl" : "t-Distributed Stochastic Neighbor embedding", "type" : "CLASS", "meta" : { "definition" : { "val" : "A statistical method for visualizing high-dimensional data by giving each datapoint a location in a two or three-dimensional map.", "xrefs" : [ "https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "t-SNE" }, { "pred" : "hasExactSynonym", "val" : "tSNE" } ] } }, { "id" : "https://w3id.org/aio/word2vec-CBOW", "lbl" : "word2vec-CBOW", "type" : "CLASS", "meta" : { "definition" : { "val" : "In the continuous bag-of-words architecture, the model predicts the current word from a window of surrounding context words. The order of context words does not influence prediction (bag-of-words assumption). (https://en.wikipedia.org/wiki/Word2vec)", "xrefs" : [ "https://en.wikipedia.org/wiki/Word2vec" ] }, "comments" : [ "Input, Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "W2V-CBOW" }, { "pred" : "hasRelatedSynonym", "val" : "CBOW" } ] } }, { "id" : "https://w3id.org/aio/word2vec-SkipGram", "lbl" : "word2vec-SkipGram", "type" : "CLASS", "meta" : { "definition" : { "val" : "In the continuous skip-gram architecture, the model uses the current word to predict the surrounding window of context words. The skip-gram architecture weighs nearby context words more heavily than more distant context words.", "xrefs" : [ "https://en.wikipedia.org/wiki/Word2vec" ] }, "comments" : [ "Input, Hidden, Output" ], "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "W2V-SkipGram" }, { "pred" : "hasRelatedSynonym", "val" : "SkipGram" } ] } }, { "id" : "https://w3id.org/aio/Simpon's_Paradox_Bias", "lbl" : "Simpon's Paradox Bias", "type" : "CLASS", "meta" : { "definition" : { "val" : "A statistical phenomenon where the marginal association between two categorical variables is qualitatively different from the partial association between the same two variables after controlling for one or more other variables. For example, the statistical association or correlation that has been detected between two variables for an entire population disappears or reverses when the population is divided into subgroups.", "xrefs" : [ "https://doi.org/10.6028/NIST.SP.1270" ] }, "synonyms" : [ { "pred" : "hasExactSynonym", "val" : "Simpson's Paradox" } ] } } ], "edges" : [ { "sub" : "https://w3id.org/aio/AbstractRNNCell", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Activation_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Active_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/ActivityRegularization_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regularization_Layer" }, { "sub" : "https://w3id.org/aio/Activity_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Use_And_Interpretation_Bias" }, { "sub" : "https://w3id.org/aio/AdaptiveAvgPool1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/AdaptiveAvgPool2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/AdaptiveAvgPool3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/AdaptiveMaxPool1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/AdaptiveMaxPool2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/AdaptiveMaxPool3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/Add_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Merging_Layer" }, { "sub" : "https://w3id.org/aio/AdditiveAttention_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Attention_Layer" }, { "sub" : "https://w3id.org/aio/AlphaDropout_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regularization_Layer" }, { "sub" : "https://w3id.org/aio/Amplification_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Processing_Bias" }, { "sub" : "https://w3id.org/aio/Anchoring_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Annotator_Reporting_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Artificial_Neural_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/Network" }, { "sub" : "https://w3id.org/aio/Association_Rule_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Supervised_Learning" }, { "sub" : "https://w3id.org/aio/Attention_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Auto_Encoder_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/UPN" }, { "sub" : "https://w3id.org/aio/Automation_Complacency_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Availability_Heuristic_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/AveragePooling1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/AveragePooling2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/AveragePooling3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/Average_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Merging_Layer" }, { "sub" : "https://w3id.org/aio/AvgPool1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/AvgPool2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/AvgPool3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/BatchNorm1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/BatchNormalization_Layer" }, { "sub" : "https://w3id.org/aio/BatchNorm2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/BatchNormalization_Layer" }, { "sub" : "https://w3id.org/aio/BatchNorm3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/BatchNormalization_Layer" }, { "sub" : "https://w3id.org/aio/BatchNormalization_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/Bayesian_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/Network" }, { "sub" : "https://w3id.org/aio/Behavioral_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Biclustering", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Bidirectional_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Recurrent_Layer" }, { "sub" : "https://w3id.org/aio/Binary_Classification", "pred" : "is_a", "obj" : "https://w3id.org/aio/Classification" }, { "sub" : "https://w3id.org/aio/Boltzmann_Machine_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/SCN" }, { "sub" : "https://w3id.org/aio/Categorical_Features_Preprocessing_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/CategoryEncoding_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Categorical_Features_Preprocessing_Layer" }, { "sub" : "https://w3id.org/aio/Causal_Graphical_Model", "pred" : "is_a", "obj" : "https://w3id.org/aio/Probabilistic_Graphical_Model" }, { "sub" : "https://w3id.org/aio/CenterCrop_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Image_Preprocessing_Layer" }, { "sub" : "https://w3id.org/aio/Classification", "pred" : "is_a", "obj" : "https://w3id.org/aio/Supervised_Learning" }, { "sub" : "https://w3id.org/aio/Clustering", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Cognitive_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Computational_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Bias" }, { "sub" : "https://w3id.org/aio/Concatenate_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Merging_Layer" }, { "sub" : "https://w3id.org/aio/Concept_Drift_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Use_And_Interpretation_Bias" }, { "sub" : "https://w3id.org/aio/Confirmation_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Consumer_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Content_Production_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Use_And_Interpretation_Bias" }, { "sub" : "https://w3id.org/aio/Continual_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Contrastive_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/ConvLSTM1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Convolutional_Layer" }, { "sub" : "https://w3id.org/aio/ConvLSTM2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Convolutional_Layer" }, { "sub" : "https://w3id.org/aio/ConvLSTM3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Convolutional_Layer" }, { "sub" : "https://w3id.org/aio/Convolution1DTranspose_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Convolution1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Convolution2DTranspose_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Convolution2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Convolution3DTranspose_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Convolution3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Convolutional_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Cropping1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Reshaping_Layer" }, { "sub" : "https://w3id.org/aio/Cropping2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Cropping3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Data_Dredging_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Use_And_Interpretation_Bias" }, { "sub" : "https://w3id.org/aio/Data_Generation_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/Data_Imputation", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Decision_Tree", "pred" : "is_a", "obj" : "https://w3id.org/aio/Classification" }, { "sub" : "https://w3id.org/aio/Decoder_LLM", "pred" : "is_a", "obj" : "https://w3id.org/aio/LLM" }, { "sub" : "https://w3id.org/aio/Deconvolutional_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Deep_Active_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Deep_Belief_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/UPN" }, { "sub" : "https://w3id.org/aio/Deep_Convolutional_Inverse_Graphics_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/AE" }, { "sub" : "https://w3id.org/aio/Deep_Convolutional_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Deep_FeedFoward", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Deep_Neural_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/ANN" }, { "sub" : "https://w3id.org/aio/Deep_Transfer_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Denoising_Auto_Encoder", "pred" : "is_a", "obj" : "https://w3id.org/aio/AE" }, { "sub" : "https://w3id.org/aio/DenseFeatures_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Dense_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Deployment_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Group_Bias" }, { "sub" : "https://w3id.org/aio/DepthwiseConv1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Convolutional_Layer" }, { "sub" : "https://w3id.org/aio/DepthwiseConv2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Convolutional_Layer" }, { "sub" : "https://w3id.org/aio/Detection_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/Dimensionality_Reduction", "pred" : "is_a", "obj" : "https://w3id.org/aio/Unsupervised_Learning" }, { "sub" : "https://w3id.org/aio/Discretization_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Numerical_Features_Preprocessing_Layer" }, { "sub" : "https://w3id.org/aio/Dot_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Dropout_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regularization_Layer" }, { "sub" : "https://w3id.org/aio/Dunning-Kruger_Effect_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Cognitive_Bias" }, { "sub" : "https://w3id.org/aio/ELU_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Activation_Layer" }, { "sub" : "https://w3id.org/aio/Echo_State_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/RecNN" }, { "sub" : "https://w3id.org/aio/Ecological_Fallacy_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/Embedding_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Emergent_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Use_And_Interpretation_Bias" }, { "sub" : "https://w3id.org/aio/Encoder-Decoder_LLM", "pred" : "is_a", "obj" : "https://w3id.org/aio/LLM" }, { "sub" : "https://w3id.org/aio/Encoder_LLM", "pred" : "is_a", "obj" : "https://w3id.org/aio/LLM" }, { "sub" : "https://w3id.org/aio/Ensemble_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Error_Propagation_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Processing_Bias" }, { "sub" : "https://w3id.org/aio/Evaluation_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/Exclusion_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/Exponential_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/Extreme_Learning_Machine", "pred" : "is_a", "obj" : "https://w3id.org/aio/FBN" }, { "sub" : "https://w3id.org/aio/Federated_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Feedback_Loop_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Use_And_Interpretation_Bias" }, { "sub" : "https://w3id.org/aio/Feedback_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/ANN" }, { "sub" : "https://w3id.org/aio/Fixed_Effects_Model", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/Flatten_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Reshaping_Layer" }, { "sub" : "https://w3id.org/aio/FractionalMaxPool2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/FractionalMaxPool3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/Funding_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Group_Bias" }, { "sub" : "https://w3id.org/aio/GRUCell_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/GRU_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Recurrent_Layer" }, { "sub" : "https://w3id.org/aio/Gated_Recurrent_Unit", "pred" : "is_a", "obj" : "https://w3id.org/aio/LSTM" }, { "sub" : "https://w3id.org/aio/GaussianDropout_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regularization_Layer" }, { "sub" : "https://w3id.org/aio/GaussianNoise_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regularization_Layer" }, { "sub" : "https://w3id.org/aio/GeLu_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/Generalized_Few-shot_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Generalized_Linear_Model", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/Generative_Adversarial_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/UPN" }, { "sub" : "https://w3id.org/aio/GlobalAveragePooling1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/GlobalAveragePooling2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/GlobalAveragePooling3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/GlobalMaxPooling1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/GlobalMaxPooling2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/GlobalMaxPooling3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/Graph_Convolutional_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Graph_Convolutional_Policy_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/GCN" }, { "sub" : "https://w3id.org/aio/GroupNorm_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/Group_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Human_Bias" }, { "sub" : "https://w3id.org/aio/Groupthink_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Group_Bias" }, { "sub" : "https://w3id.org/aio/Hard_Sigmoid_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/Hashing_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Categorical_Features_Preprocessing_Layer" }, { "sub" : "https://w3id.org/aio/Hidden_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Hierarchical_Classification", "pred" : "is_a", "obj" : "https://w3id.org/aio/Classification" }, { "sub" : "https://w3id.org/aio/Hierarchical_Clustering", "pred" : "is_a", "obj" : "https://w3id.org/aio/Clustering" }, { "sub" : "https://w3id.org/aio/Historical_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Bias" }, { "sub" : "https://w3id.org/aio/Hopfield_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/SCN" }, { "sub" : "https://w3id.org/aio/Hostile_Attribution_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Use_And_Interpretation_Bias" }, { "sub" : "https://w3id.org/aio/Human_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Bias" }, { "sub" : "https://w3id.org/aio/Human_Reporting_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Image_Augmentation_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Image_Preprocessing_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Implicit_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Incremenetal_Few-shot_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Individual_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Bias" }, { "sub" : "https://w3id.org/aio/Inherited_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Processing_Bias" }, { "sub" : "https://w3id.org/aio/InputLayer_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/InputSpec_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Input_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/InstanceNorm1d_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/InstanceNorm2d", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/InstanceNorm3d_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/Institutional_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Bias" }, { "sub" : "https://w3id.org/aio/IntegerLookup_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Categorical_Features_Preprocessing_Layer" }, { "sub" : "https://w3id.org/aio/Interpretation_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/K-nearest_Neighbor_Algorithm", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/K-nearest_Neighbor_Classification_Algorithm", "pred" : "is_a", "obj" : "https://w3id.org/aio/Classification" }, { "sub" : "https://w3id.org/aio/K-nearest_Neighbor_Classification_Algorithm", "pred" : "is_a", "obj" : "https://w3id.org/aio/Clustering" }, { "sub" : "https://w3id.org/aio/K-nearest_Neighbor_Regression_Algorithm", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/Kohonen_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/Network" }, { "sub" : "https://w3id.org/aio/LPPool1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/LPPool2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/LSTMCell_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/LSTM_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Recurrent_Layer" }, { "sub" : "https://w3id.org/aio/Lambda_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Lasso_Regression", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/LayerNorm_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/LayerNormalization_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/Layer_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/LazyBatchNorm1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/BatchNormalization_Layer" }, { "sub" : "https://w3id.org/aio/LazyBatchNorm2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/BatchNormalization_Layer" }, { "sub" : "https://w3id.org/aio/LazyBatchNorm3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/BatchNormalization_Layer" }, { "sub" : "https://w3id.org/aio/LazyInstanceNorm1d_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/LazyInstanceNorm2d_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/LazyInstanceNorm3d_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/LeakyReLU_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Activation_Layer" }, { "sub" : "https://w3id.org/aio/Least-squares_Analysis", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/Linear_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/Linear_Regression", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/Linking_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Use_And_Interpretation_Bias" }, { "sub" : "https://w3id.org/aio/Liquid_State_Machine_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/Network" }, { "sub" : "https://w3id.org/aio/LocalResponseNorm_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Normalization_Layer" }, { "sub" : "https://w3id.org/aio/Locally-connected_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/LocallyConnected1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Locally-connected_Layer" }, { "sub" : "https://w3id.org/aio/LocallyConnected2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Locally-connected_Layer" }, { "sub" : "https://w3id.org/aio/Logistic_Regression", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/Long_Short_Term_Memory", "pred" : "is_a", "obj" : "https://w3id.org/aio/RecNN" }, { "sub" : "https://w3id.org/aio/Loss_Of_Situational_Awareness_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Machine_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Method" }, { "sub" : "https://w3id.org/aio/Manifold_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Dimensionality_Reduction" }, { "sub" : "https://w3id.org/aio/Markov_Chain", "pred" : "is_a", "obj" : "https://w3id.org/aio/Network" }, { "sub" : "https://w3id.org/aio/Masking_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/MaxPooling1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/MaxPooling2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/MaxPooling3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/MaxUnpool1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/MaxUnpool2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/MaxUnpool3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Pooling_Layer" }, { "sub" : "https://w3id.org/aio/Maximum_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Merging_Layer" }, { "sub" : "https://w3id.org/aio/Measurement_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/Merging_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Meta-Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Metric_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Minimum_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Merging_Layer" }, { "sub" : "https://w3id.org/aio/Mode_Confusion_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Model_Selection_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Processing_Bias" }, { "sub" : "https://w3id.org/aio/MultiHeadAttention_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Attention_Layer" }, { "sub" : "https://w3id.org/aio/Multiclass_Classification", "pred" : "is_a", "obj" : "https://w3id.org/aio/Classification" }, { "sub" : "https://w3id.org/aio/Multidimensional_Scaling", "pred" : "is_a", "obj" : "https://w3id.org/aio/Dimensionality_Reduction" }, { "sub" : "https://w3id.org/aio/Multimodal_Deep_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Multimodal_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Multiply_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Merging_Layer" }, { "sub" : "https://w3id.org/aio/Natural_Language_Processing", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Neural_Turing_Machine_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/DFF" }, { "sub" : "https://w3id.org/aio/Neural_Turing_Machine_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/LSTM" }, { "sub" : "https://w3id.org/aio/Noise_Dense_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Normalization_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Numerical_Features_Preprocessing_Layer" }, { "sub" : "https://w3id.org/aio/Numerical_Features_Preprocessing_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/One-shot_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Output_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/PReLU_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Activation_Layer" }, { "sub" : "https://w3id.org/aio/Perceptron", "pred" : "is_a", "obj" : "https://w3id.org/aio/ANN" }, { "sub" : "https://w3id.org/aio/Permute_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Reshaping_Layer" }, { "sub" : "https://w3id.org/aio/Pooling_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Popularity_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/Population_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/Preprocessing_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Presentation_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Principal_Component_Analysis", "pred" : "is_a", "obj" : "https://w3id.org/aio/Dimensionality_Reduction" }, { "sub" : "https://w3id.org/aio/Probabilistic_Graphical_Model", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Probabilistic_Topic_Model", "pred" : "is_a", "obj" : "https://w3id.org/aio/Probabilistic_Graphical_Model" }, { "sub" : "https://w3id.org/aio/Processing_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Computational_Bias" }, { "sub" : "https://w3id.org/aio/Proportional_Hazards_Model", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/Proportional_Hazards_Model", "pred" : "is_a", "obj" : "https://w3id.org/aio/Survival_Analysis" }, { "sub" : "https://w3id.org/aio/RNN_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Recurrent_Layer" }, { "sub" : "https://w3id.org/aio/Radial_Basis_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/DFF" }, { "sub" : "https://w3id.org/aio/RandomBrightness_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/RandomContrast_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/RandomCrop_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/RandomFlip_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/RandomHeight_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/RandomRotation_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/RandomTranslation_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/RandomWidth_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/RandomZoom_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Random_Effects_Model", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/Random_Forest", "pred" : "is_a", "obj" : "https://w3id.org/aio/Ensemble_Learning" }, { "sub" : "https://w3id.org/aio/Ranking_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Anchoring_Bias" }, { "sub" : "https://w3id.org/aio/Rashomon_Effect_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/ReLU_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/ReLU_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Activation_Layer" }, { "sub" : "https://w3id.org/aio/Recurrent_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Recurrent_Neural_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Recursive_Neural_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Regression_Analysis", "pred" : "is_a", "obj" : "https://w3id.org/aio/Supervised_Learning" }, { "sub" : "https://w3id.org/aio/Regularization_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Reinforcement_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/RepeatVector_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Reshaping_Layer" }, { "sub" : "https://w3id.org/aio/Representation_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/Representation_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Rescaling_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Image_Preprocessing_Layer" }, { "sub" : "https://w3id.org/aio/Reshape_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Reshaping_Layer" }, { "sub" : "https://w3id.org/aio/Reshaping_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Residual_Neural_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Resizing_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Image_Preprocessing_Layer" }, { "sub" : "https://w3id.org/aio/Restricted_Boltzmann_Machine", "pred" : "is_a", "obj" : "https://w3id.org/aio/BM" }, { "sub" : "https://w3id.org/aio/Ridge_Regression", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/SeLu_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/Selection_And_Sampling_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Computational_Bias" }, { "sub" : "https://w3id.org/aio/Selective_Adherence_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Self-supervised_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/SeparableConvolution1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Convolutional_Layer" }, { "sub" : "https://w3id.org/aio/SeparableConvolution2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Convolutional_Layer" }, { "sub" : "https://w3id.org/aio/Sigmoid_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/SimpleRNNCell_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/SimpleRNN_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Recurrent_Layer" }, { "sub" : "https://w3id.org/aio/Societal_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Bias" }, { "sub" : "https://w3id.org/aio/Softmax_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/Softmax_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Activation_Layer" }, { "sub" : "https://w3id.org/aio/Softplus_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/Softsign_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/Sparse_AE", "pred" : "is_a", "obj" : "https://w3id.org/aio/AE" }, { "sub" : "https://w3id.org/aio/Sparse_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Representation_Learning" }, { "sub" : "https://w3id.org/aio/SpatialDropout1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regularization_Layer" }, { "sub" : "https://w3id.org/aio/SpatialDropout2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regularization_Layer" }, { "sub" : "https://w3id.org/aio/SpatialDropout3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regularization_Layer" }, { "sub" : "https://w3id.org/aio/Spatial_Regression", "pred" : "is_a", "obj" : "https://w3id.org/aio/Regression_Analysis" }, { "sub" : "https://w3id.org/aio/StackedRNNCells_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Streetlight_Effect_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/StringLookup_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Categorical_Features_Preprocessing_Layer" }, { "sub" : "https://w3id.org/aio/Subtract_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Merging_Layer" }, { "sub" : "https://w3id.org/aio/Sunk_Cost_Fallacy_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Group_Bias" }, { "sub" : "https://w3id.org/aio/Supervised_Biclustering", "pred" : "is_a", "obj" : "https://w3id.org/aio/Biclustering" }, { "sub" : "https://w3id.org/aio/Supervised_Clustering", "pred" : "is_a", "obj" : "https://w3id.org/aio/Clustering" }, { "sub" : "https://w3id.org/aio/Supervised_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Support_Vector_Machine", "pred" : "is_a", "obj" : "https://w3id.org/aio/Network" }, { "sub" : "https://w3id.org/aio/Survival_Analysis", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Survivorship_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Processing_Bias" }, { "sub" : "https://w3id.org/aio/Swish_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/Symmetrically_Connected_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/Network" }, { "sub" : "https://w3id.org/aio/SyncBatchNorm_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/BatchNormalization_Layer" }, { "sub" : "https://w3id.org/aio/Systemic_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Bias" }, { "sub" : "https://w3id.org/aio/Tanh_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/Temporal_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/TextVectorization_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Text_Preprocessing_Layer" }, { "sub" : "https://w3id.org/aio/Text_Preprocessing_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/ThresholdedReLU_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Activation_Layer" }, { "sub" : "https://w3id.org/aio/TimeDistributed_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Recurrent_Layer" }, { "sub" : "https://w3id.org/aio/Time_Series_Analysis", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Time_Series_Forecasting", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Transfer_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Transformer_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/Uncertainty_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" }, { "sub" : "https://w3id.org/aio/UnitNormalization_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Recurrent_Layer" }, { "sub" : "https://w3id.org/aio/Unsupervised_Biclustering", "pred" : "is_a", "obj" : "https://w3id.org/aio/Biclustering" }, { "sub" : "https://w3id.org/aio/Unsupervised_Clustering", "pred" : "is_a", "obj" : "https://w3id.org/aio/Clustering" }, { "sub" : "https://w3id.org/aio/Unsupervised_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/Machine_Learning" }, { "sub" : "https://w3id.org/aio/Unsupervised_Pretrained_Network", "pred" : "is_a", "obj" : "https://w3id.org/aio/Network" }, { "sub" : "https://w3id.org/aio/UpSampling1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Reshaping_Layer" }, { "sub" : "https://w3id.org/aio/UpSampling2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/UpSampling3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Use_And_Interpretation_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Computational_Bias" }, { "sub" : "https://w3id.org/aio/User_Interaction_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Individual_Bias" }, { "sub" : "https://w3id.org/aio/Variational_Auto_Encoder", "pred" : "is_a", "obj" : "https://w3id.org/aio/AE" }, { "sub" : "https://w3id.org/aio/Wrapper_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Layer" }, { "sub" : "https://w3id.org/aio/Zero-shot_Learning", "pred" : "is_a", "obj" : "https://w3id.org/aio/DNN" }, { "sub" : "https://w3id.org/aio/ZeroPadding1D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Reshaping_Layer" }, { "sub" : "https://w3id.org/aio/ZeroPadding2D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Reshaping_Layer" }, { "sub" : "https://w3id.org/aio/ZeroPadding3D_Layer", "pred" : "is_a", "obj" : "https://w3id.org/aio/Reshaping_Layer" }, { "sub" : "https://w3id.org/aio/eLu_Function", "pred" : "is_a", "obj" : "https://w3id.org/aio/Function" }, { "sub" : "https://w3id.org/aio/node2vec-CBOW", "pred" : "is_a", "obj" : "https://w3id.org/aio/W2V_CBOW" }, { "sub" : "https://w3id.org/aio/node2vec-SkipGram", "pred" : "is_a", "obj" : "https://w3id.org/aio/W2V_SkipGram" }, { "sub" : "https://w3id.org/aio/t-Distributed_Stochastic_Neighbor_embedding", "pred" : "is_a", "obj" : "https://w3id.org/aio/Dimensionality_Reduction" }, { "sub" : "https://w3id.org/aio/word2vec-CBOW", "pred" : "is_a", "obj" : "https://w3id.org/aio/ANN" }, { "sub" : "https://w3id.org/aio/word2vec-SkipGram", "pred" : "is_a", "obj" : "https://w3id.org/aio/ANN" }, { "sub" : "https://w3id.org/aio/Simpon's_Paradox_Bias", "pred" : "is_a", "obj" : "https://w3id.org/aio/Selection_And_Sampling_Bias" } ] } ] }