def _update_output(self, input, weight, bias):
self.use_cudnn = cudnn.is_acceptable(input)
if self.use_cudnn and cudnn.version() < 6000:
self.use_cudnn = not self.is_dilated()
if self.use_cudnn:
output = input.new(*self._output_size(input, weight))
if self.transposed:
self._cudnn_info = (
torch._C._cudnn_convolution_transpose_full_forward(
input, weight, bias, output, self.padding, self.stride, self.dilation,
self.groups, cudnn.benchmark))
else:
self._cudnn_info = torch._C._cudnn_convolution_full_forward(
input, weight, bias, output, self.padding, self.stride, self.dilation,
self.groups, cudnn.benchmark)
if not self.requires_grad:
del self._cudnn_info
return output
self._bufs = [[] for g in range(self.groups)]
output = self._thnn('update_output', input, weight, bias)
if not self.requires_grad:
del self._bufs
return output
python类version()的实例源码
def _update_output(self, input, weight, bias):
self.use_cudnn = cudnn.is_acceptable(input)
if self.use_cudnn and cudnn.version() < 6000:
self.use_cudnn = not self.is_dilated()
if self.use_cudnn:
output = input.new(*self._output_size(input, weight))
if self.transposed:
self._cudnn_info = (
torch._C._cudnn_convolution_transpose_full_forward(
input, weight, bias, output, self.padding, self.stride, self.dilation,
self.groups, cudnn.benchmark))
else:
self._cudnn_info = torch._C._cudnn_convolution_full_forward(
input, weight, bias, output, self.padding, self.stride, self.dilation,
self.groups, cudnn.benchmark)
if not self.requires_grad:
del self._cudnn_info
return output
self._bufs = [[] for g in range(self.groups)]
output = self._thnn('update_output', input, weight, bias)
if not self.requires_grad:
del self._bufs
return output
def backward_weight(fn, input, hx, output, weight, grad_weight):
with torch.cuda.device_of(input):
is_input_packed = fn.batch_sizes is not None
handle = cudnn.get_handle()
if fn.mode == cudnn.CUDNN_LSTM:
hx, cx = hx
else:
cx = None
if fn.batch_first and not is_input_packed:
input = input.transpose(0, 1)
output = output.transpose(0, 1)
input_size = _input_size(fn, input)
hidden_size = _hidden_size(fn)
if not fn.requires_grad:
raise RuntimeError('backward_weight can only be called when the function requires grad!')
if fn.dropout != 0 and cudnn.version() < 5103:
raise RuntimeError('dropout supported only in cudnn v 5.1 and above')
if tuple(input.size()) != input_size:
raise RuntimeError('Expected input size {}, got {}'.format(
input_size, tuple(input.size())))
if tuple(hx.size()) != hidden_size:
raise RuntimeError('Expected input size {}, got {}'.format(
hidden_size, hx.size()))
assert hx.is_contiguous()
assert cx is None or cx.is_contiguous()
x = input.contiguous()
y = output
dw = fn.weight_buf.new().resize_as_(fn.weight_buf).zero_()
check_error(cudnn.lib.cudnnRNNBackwardWeights(
handle,
fn.rnn_desc,
fn.seq_length,
fn.x_descs, ctypes.c_void_p(x.data_ptr()),
fn.hx_desc, ctypes.c_void_p(hx.data_ptr()),
fn.y_descs, ctypes.c_void_p(y.data_ptr()),
ctypes.c_void_p(fn.workspace.data_ptr()), fn.workspace.size(0),
fn.w_desc, ctypes.c_void_p(dw.data_ptr()),
ctypes.c_void_p(fn.reserve.data_ptr()), fn.reserve.size(0)
))
# copy the weights from the weight_buf into grad_weight
grad_params = get_parameters(fn, handle, dw)
_copyParams(grad_params, grad_weight)
return grad_weight
def backward_weight(fn, input, hx, output, weight, grad_weight):
with torch.cuda.device_of(input):
is_input_packed = fn.batch_sizes is not None
handle = cudnn.get_handle()
if fn.mode == cudnn.CUDNN_LSTM:
hx, cx = hx
else:
cx = None
if fn.batch_first and not is_input_packed:
input = input.transpose(0, 1)
output = output.transpose(0, 1)
input_size = _input_size(fn, input)
hidden_size = _hidden_size(fn)
if not fn.requires_grad:
raise RuntimeError('backward_weight can only be called when the function requires grad!')
if fn.dropout != 0 and cudnn.version() < 5103:
raise RuntimeError('dropout supported only in cudnn v 5.1 and above')
if tuple(input.size()) != input_size:
raise RuntimeError('Expected input size {}, got {}'.format(
input_size, tuple(input.size())))
if tuple(hx.size()) != hidden_size:
raise RuntimeError('Expected input size {}, got {}'.format(
hidden_size, hx.size()))
assert hx.is_contiguous()
assert cx is None or cx.is_contiguous()
x = input.contiguous()
y = output
dw = fn.weight_buf.new().resize_as_(fn.weight_buf).zero_()
check_error(cudnn.lib.cudnnRNNBackwardWeights(
handle,
fn.rnn_desc,
fn.seq_length,
fn.x_descs, ctypes.c_void_p(x.data_ptr()),
fn.hx_desc, ctypes.c_void_p(hx.data_ptr()),
fn.y_descs, ctypes.c_void_p(y.data_ptr()),
ctypes.c_void_p(fn.workspace.data_ptr()), fn.workspace.size(0),
fn.w_desc, ctypes.c_void_p(dw.data_ptr()),
ctypes.c_void_p(fn.reserve.data_ptr()), fn.reserve.size(0)
))
# copy the weights from the weight_buf into grad_weight
grad_params = get_parameters(fn, handle, dw)
_copyParams(grad_params, grad_weight)
return grad_weight
def backward_weight(fn, input, hx, output, weight, grad_weight):
with torch.cuda.device_of(input):
is_input_packed = fn.batch_sizes is not None
handle = cudnn.get_handle()
if fn.mode == cudnn.CUDNN_LSTM:
hx, cx = hx
else:
cx = None
if fn.batch_first and not is_input_packed:
input = input.transpose(0, 1)
output = output.transpose(0, 1)
input_size = _input_size(fn, input)
hidden_size = _hidden_size(fn)
if not fn.requires_grad:
raise RuntimeError('backward_weight can only be called when the function requires grad!')
if fn.dropout != 0 and cudnn.version() < 5103:
raise RuntimeError('dropout supported only in cudnn v 5.1 and above')
if tuple(input.size()) != input_size:
raise RuntimeError('Expected input size {}, got {}'.format(
input_size, tuple(input.size())))
if tuple(hx.size()) != hidden_size:
raise RuntimeError('Expected input size {}, got {}'.format(
hidden_size, hx.size()))
assert hx.is_contiguous()
assert cx is None or cx.is_contiguous()
x = input.contiguous()
y = output
dw = fn.weight_buf.new().resize_as_(fn.weight_buf).zero_()
with torch.cuda.device_of(input):
workspace = torch.cuda.ByteTensor(fn.workspace_size)
check_error(cudnn.lib.cudnnRNNBackwardWeights(
handle,
fn.rnn_desc,
fn.seq_length,
fn.x_descs, ctypes.c_void_p(x.data_ptr()),
fn.hx_desc, ctypes.c_void_p(hx.data_ptr()),
fn.y_descs, ctypes.c_void_p(y.data_ptr()),
ctypes.c_void_p(workspace.data_ptr()), workspace.size(0),
fn.w_desc, ctypes.c_void_p(dw.data_ptr()),
ctypes.c_void_p(fn.reserve.data_ptr()), fn.reserve.size(0)
))
# copy the weights from the weight_buf into grad_weight
grad_params = get_parameters(fn, handle, dw)
_copyParams(grad_params, grad_weight)
return grad_weight
def backward_weight(fn, input, hx, output, weight, grad_weight):
with torch.cuda.device_of(input):
is_input_packed = fn.batch_sizes is not None
handle = cudnn.get_handle()
if fn.mode == cudnn.CUDNN_LSTM:
hx, cx = hx
else:
cx = None
if fn.batch_first and not is_input_packed:
input = input.transpose(0, 1)
output = output.transpose(0, 1)
input_size = _input_size(fn, input)
hidden_size = _hidden_size(fn)
if not fn.requires_grad:
raise RuntimeError('backward_weight can only be called when the function requires grad!')
if fn.dropout != 0 and cudnn.version() < 5103:
raise RuntimeError('dropout supported only in cudnn v 5.1 and above')
if tuple(input.size()) != input_size:
raise RuntimeError('Expected input size {}, got {}'.format(
input_size, tuple(input.size())))
if tuple(hx.size()) != hidden_size:
raise RuntimeError('Expected input size {}, got {}'.format(
hidden_size, hx.size()))
assert hx.is_contiguous()
assert cx is None or cx.is_contiguous()
x = input.contiguous()
y = output
dw = fn.weight_buf.new().resize_as_(fn.weight_buf).zero_()
with torch.cuda.device_of(input):
workspace = torch.cuda.ByteTensor(fn.workspace_size)
check_error(cudnn.lib.cudnnRNNBackwardWeights(
handle,
fn.rnn_desc,
fn.seq_length,
fn.x_descs, ctypes.c_void_p(x.data_ptr()),
fn.hx_desc, ctypes.c_void_p(hx.data_ptr()),
fn.y_descs, ctypes.c_void_p(y.data_ptr()),
ctypes.c_void_p(workspace.data_ptr()), workspace.size(0),
fn.w_desc, ctypes.c_void_p(dw.data_ptr()),
ctypes.c_void_p(fn.reserve.data_ptr()), fn.reserve.size(0)
))
# copy the weights from the weight_buf into grad_weight
grad_params = get_parameters(fn, handle, dw)
_copyParams(grad_params, grad_weight)
return grad_weight