Source code for pahelix.networks.pre_post_process

#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
Preprocess and postprocess layers
"""

from functools import partial

import paddle.fluid as fluid
import paddle.fluid.layers as layers

[docs]def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0., epsilon=1e-5, name="", is_test=False): """ Add residual connection, layer normalization and droput to the out tensor optionally according to the value of process_cmd. This will be used before or after multi-head attention and position-wise feed-forward networks. """ for cmd in process_cmd: if cmd == "a": # add residual connection out = out + prev_out if prev_out else out elif cmd == "n": # add layer normalization out = layers.layer_norm( out, begin_norm_axis=len(out.shape) - 1, param_attr=fluid.ParamAttr( name=name + "_layer_norm_scale", initializer=fluid.initializer.Constant(1.)), bias_attr=fluid.ParamAttr( name=name + "_layer_norm_bias", initializer=fluid.initializer.Constant(0.)), epsilon=epsilon) elif cmd == "d": # add dropout if dropout_rate: out = layers.dropout( out, dropout_prob=dropout_rate, dropout_implementation="upscale_in_train", is_test=is_test) return out
pre_process_layer = partial(pre_post_process_layer, None) post_process_layer = pre_post_process_layer