我们现在准备好从头开始实施 RNN。特别是,我们将训练此 RNN 作为字符级语言模型(参见 第 9.4 节),并按照第 9.2 节中概述的数据处理步骤,在由 HG Wells 的《时间机器》的整个文本组成的语料库上对其进行训练. 我们首先加载数据集。
9.5.1. 循环神经网络模型
我们首先定义一个类来实现 RNN 模型(第 9.4.2 节)。请注意,隐藏单元的数量num_hiddens
是一个可调的超参数。
class RNNScratch(d2l.Module): #@save
"""The RNN model implemented from scratch."""
def __init__(self, num_inputs, num_hiddens, sigma=0.01):
super().__init__()
self.save_hyperparameters()
self.W_xh = nn.Parameter(
torch.randn(num_inputs, num_hiddens) * sigma)
self.W_hh = nn.Parameter(
torch.randn(num_hiddens, num_hiddens) * sigma)
self.b_h = nn.Parameter(torch.zeros(num_hiddens))
class RNNScratch(d2l.Module): #@save
"""The RNN model implemented from scratch."""
def __init__(self, num_inputs, num_hiddens, sigma=0.01):
super().__init__()
self.save_hyperparameters()
self.W_xh = np.random.randn(num_inputs, num_hiddens) * sigma
self.W_hh = np.random.randn(
num_hiddens, num_hiddens) * sigma
self.b_h = np.zeros(num_hiddens)
class RNNScratch(nn.Module): #@save
"""The RNN model implemented from scratch."""
num_inputs: int
num_hiddens: int
sigma: float = 0.01
def setup(self):
self.W_xh = self.param('W_xh', nn.initializers.normal(self.sigma),
(self.num_inputs, self.num_hiddens))
self.W_hh = self.param('W_hh', nn.initializers.normal(self.sigma),
(self.num_hiddens, self.num_hiddens))
self.b_h = self.param('b_h', nn.initializers.zeros, (self.num_hiddens))
class RNNScratch(d2l.Module): #@save
"""The RNN model implemented from scratch."""
def __init__(self, num_inputs, num_hiddens, sigma=0.01):
super().__init__()
self.save_hyperparameters()
self.W_xh = tf.Variable(tf.random.normal(
(num_inputs, num_hiddens)) * sigma)
self.W_hh = tf.Variable(tf.random.normal(
(num_hiddens, num_hiddens)) * sigma)
self.b_h = tf.Variable(tf.zeros(num_hiddens))
下面的方法forward
定义了如何计算任何时间步的输出和隐藏状态,给定当前输入和模型在前一个时间步的状态。请注意,RNN 模型循环遍历 的最外层维度inputs
,一次更新隐藏状态。这里的模型使用了tanh激活函数(第 5.1.2.3 节)。
@d2l.add_to_class(RNNScratch) #@save
def forward(self, inputs, state=None):
if state is None:
# Initial state with shape: (batch_size, num_hiddens)
state = torch.zeros((inputs.shape[1], self.num_hiddens),
device=inputs.device)
else:
state, = state
outputs = []
for X in inputs: # Shape of inputs: (num_steps, batch_size, num_inputs)
state = torch.tanh(torch.matmul(X, self.W_xh) +
torch.matmul(state, self.W_hh) + self.b_h)
outputs.append(state)
return outputs, state
@d2l.add_to_class(RNNScratch) #@save
def forward(self, inputs, state=None):
if state is None:
# Initial state with shape: (batch_size, num_hiddens)
state = np.zeros((inputs.shape[1], self.num_hiddens),
ctx=inputs.ctx)
else:
state, = state
outputs = []
for X in inputs: # Shape of inputs: (num_steps, batch_size, num_inputs)
state = np.tanh(np.dot(X, self.W_xh) +
np.dot(state, self.W_hh) + self.b_h)
outputs.append(state)
return outputs, state
@d2l.add_to_class(RNNScratch) #@save
def __call__(self, inputs, state=None):
if state is not None:
state, = state
outputs = []
for X in inputs: # Shape of inputs: (num_steps, batch_size, num_inputs)
state = jnp.tanh(jnp.matmul(X, self.W_xh) + (
jnp.matmul(state, self.W_hh) if state is not None else 0)
+ self.b_h)
outputs.append(state)
return outputs, state
@d2l.add_to_class(RNNScratch) #@save
def forward(self, inputs, state=None):
if state is None:
# Initial state with shape: (batch_size, num_hiddens)
state = tf.zeros((inputs.shape[1], self.num_hiddens))
else:
state, = state
state = tf.reshape(state, (-1, self.num_hiddens))
outputs = []
for X in inputs: # Shape of inputs: (num_steps, batch_size, num_inputs)
state = tf.tanh(tf.matmul(X, self.W_xh) +
tf.matmul(state, self.W_hh) + self.b_h)
outputs.append(state)
return outputs, state
我们可以将一小批输入序列输入 RNN 模型,如下所示。
batch_size, num_inputs, num_hiddens, num_steps = 2, 16, 32, 100
rnn = RNNScratch(num_inputs, num_hiddens)
X = jnp.ones((num_steps, batch_size, num_inputs))
(output