Skip to content

Commit

Permalink
Merge pull request #301 from PraveerSINGH/SpatialFullConvolution-noBias
Browse files Browse the repository at this point in the history
Add noBias for nn.SpatialFullConvolution
  • Loading branch information
soumith authored Jun 23, 2016
2 parents 67c87ef + b80facc commit 0e7f438
Show file tree
Hide file tree
Showing 2 changed files with 195 additions and 130 deletions.
46 changes: 26 additions & 20 deletions lib/THCUNN/SpatialFullConvolution.cu
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ void THNN_CudaSpatialFullConvolution_updateOutput(

THCUNN_assertSameGPU(state, 6, input, output, weight,
bias, columns, ones);

THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");

int batch = 1;
Expand Down Expand Up @@ -100,16 +101,18 @@ void THNN_CudaSpatialFullConvolution_updateOutput(
long k_ = 1;

// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(state, ones), k_,
THCudaTensor_data(state, bias), k_,
1,
THCudaTensor_data(state, output_n), n_
);
if (bias) {
THCudaBlas_gemm(
state,
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(state, ones), k_,
THCudaTensor_data(state, bias), k_,
1,
THCudaTensor_data(state, output_n), n_
);
}

}

Expand Down Expand Up @@ -236,6 +239,7 @@ void THNN_CudaSpatialFullConvolution_accGradParameters(

THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight,
gradBias, columns, ones);

THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");

int batch = 1;
Expand Down Expand Up @@ -307,16 +311,18 @@ void THNN_CudaSpatialFullConvolution_accGradParameters(
long k_ = outputHeight * outputWidth;

// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
THCudaBlas_gemv(
state,
't',
k_, m_,
scale,
THCudaTensor_data(state, gradOutput_n), k_,
THCudaTensor_data(state, ones), 1,
1,
THCudaTensor_data(state, gradBias), 1
);
if (gradBias) {
THCudaBlas_gemv(
state,
't',
k_, m_,
scale,
THCudaTensor_data(state, gradOutput_n), k_,
THCudaTensor_data(state, ones), 1,
1,
THCudaTensor_data(state, gradBias), 1
);
}
}

// Free
Expand Down
279 changes: 169 additions & 110 deletions test.lua
Original file line number Diff line number Diff line change
Expand Up @@ -1361,29 +1361,43 @@ function cunntest.SpatialFullConvolution_forward_single()
from, inj, ini, kj, ki, to, outj, outi, sj, si, padH, padW, adjH, adjW)
times[title] = tm

local input = torch.randn(from,inj,ini)
local sconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH)
local groundtruth = sconv:forward(input)
local a = torch.Timer()
for i = 1,nloop do
groundtruth = sconv:forward(input)
end
tm.cpu = a:time().real
local function jacTests(noBias)
noBias = noBias or false
local input = torch.randn(from,inj,ini)
local sconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH)
if noBias then
sconv:noBias()
end
local groundtruth = sconv:forward(input)
local a = torch.Timer()
for i = 1,nloop do
groundtruth = sconv:forward(input)
end
tm.cpu = a:time().real

input = input:cuda()
local gconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH):cuda()
gconv.weight = sconv.weight:cuda()
gconv.bias = sconv.bias:cuda()
local rescuda = gconv:forward(input)
a:reset()
for i = 1,nloop do
rescuda = gconv:forward(input)
input = input:cuda()
local gconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH):cuda()
if noBias then
gconv:noBias()
end
gconv.weight = sconv.weight:cuda()
if gconv.bias then
gconv.bias = sconv.bias:cuda()
end
local rescuda = gconv:forward(input)
a:reset()
for i = 1,nloop do
rescuda = gconv:forward(input)
end
cutorch.synchronize()
tm.gpu = a:time().real

local error = rescuda:float() - groundtruth
mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
end
cutorch.synchronize()
tm.gpu = a:time().real

local error = rescuda:float() - groundtruth
mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
jacTests(false)
jacTests(true)
end

function cunntest.SpatialFullConvolution_forward_batch()
Expand All @@ -1408,29 +1422,43 @@ function cunntest.SpatialFullConvolution_forward_batch()
bs, from, inj, ini, kj, ki, bs, to, outj, outi, sj, si, padH, padW, adjH, adjW)
times[title] = tm

local input = torch.randn(bs,from,inj,ini)
local sconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH)
local groundtruth = sconv:forward(input)
local a = torch.Timer()
for i = 1,nloop do
groundtruth = sconv:forward(input)
end
tm.cpu = a:time().real
local function jacTests(noBias)
noBias = noBias or false
local input = torch.randn(bs,from,inj,ini)
local sconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH)
if noBias then
sconv:noBias()
end
local groundtruth = sconv:forward(input)
local a = torch.Timer()
for i = 1,nloop do
groundtruth = sconv:forward(input)
end
tm.cpu = a:time().real

input = input:cuda()
local gconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH):cuda()
gconv.weight = sconv.weight:cuda()
gconv.bias = sconv.bias:cuda()
local rescuda = gconv:forward(input)
a:reset()
for i = 1,nloop do
rescuda = gconv:forward(input)
input = input:cuda()
local gconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH):cuda()
if noBias then
gconv:noBias()
end
gconv.weight = sconv.weight:cuda()
if gconv.bias then
gconv.bias = sconv.bias:cuda()
end
local rescuda = gconv:forward(input)
a:reset()
for i = 1,nloop do
rescuda = gconv:forward(input)
end
cutorch.synchronize()
tm.gpu = a:time().real

local error = rescuda:float() - groundtruth
mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
end
cutorch.synchronize()
tm.gpu = a:time().real

local error = rescuda:float() - groundtruth
mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
jacTests(false)
jacTests(true)
end

function cunntest.SpatialFullConvolution_backward_single()
Expand All @@ -1454,46 +1482,62 @@ function cunntest.SpatialFullConvolution_backward_single()
from, inj, ini, kj, ki, to, outj, outi, sj, si, padH, padW, adjH, adjW)
times[title] = tm

local input = torch.randn(from,inj,ini)
local sconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH)
local output = sconv:forward(input)
local gradOutput = output:clone():normal()
sconv:zeroGradParameters()
local groundgrad = sconv:backward(input, gradOutput)
local a = torch.Timer()
for i = 1,nloop do
local function jacTests(noBias)
noBias = noBias or false
local input = torch.randn(from,inj,ini)
local sconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH)
if noBias then
sconv:noBias()
end
local output = sconv:forward(input)
local gradOutput = output:clone():normal()
sconv:zeroGradParameters()
groundgrad = sconv:backward(input, gradOutput)
end
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
tm.cpu = a:time().real
local groundgrad = sconv:backward(input, gradOutput)
local a = torch.Timer()
for i = 1,nloop do
sconv:zeroGradParameters()
groundgrad = sconv:backward(input, gradOutput)
end
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
tm.cpu = a:time().real

input = input:cuda()
gradOutput = gradOutput:cuda()
local gconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH):cuda()
gconv.weight = sconv.weight:cuda()
gconv.bias = sconv.bias:cuda()
gconv:forward(input)
gconv:zeroGradParameters()
local rescuda = gconv:backward(input, gradOutput)
a:reset()
for i = 1,nloop do
input = input:cuda()
gradOutput = gradOutput:cuda()
local gconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH):cuda()
if noBias then
gconv:noBias()
end
gconv.weight = sconv.weight:cuda()
if gconv.bias then
gconv.bias = sconv.bias:cuda()
end
gconv:forward(input)
gconv:zeroGradParameters()
rescuda = gconv:backward(input, gradOutput)
end
local weightcuda = gconv.gradWeight
local biascuda = gconv.gradBias
cutorch.synchronize()
tm.gpu = a:time().real
local rescuda = gconv:backward(input, gradOutput)
a:reset()
for i = 1,nloop do
gconv:zeroGradParameters()
rescuda = gconv:backward(input, gradOutput)
end
local weightcuda = gconv.gradWeight
cutorch.synchronize()
tm.gpu = a:time().real

local error = rescuda:float() - groundgrad
local werror = weightcuda:float() - groundweight
local berror = biascuda:float() - groundbias
local error = rescuda:float() - groundgrad
local werror = weightcuda:float() - groundweight

mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')
mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')

if gconv.bias then
local berror = gconv.gradBias:float() - groundbias
mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
end
end

jacTests(false)
jacTests(true)
end

function cunntest.SpatialFullConvolution_backward_batch()
Expand All @@ -1520,46 +1564,61 @@ function cunntest.SpatialFullConvolution_backward_batch()
bs, to, outj, outi, sj, si, padH, padW, adjH, adjW)
times[title] = tm

local input = torch.randn(bs,from,inj,ini)
local sconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH)
local output = sconv:forward(input)
local gradOutput = output:clone():normal()
sconv:zeroGradParameters()
local groundgrad = sconv:backward(input, gradOutput)
local a = torch.Timer()
for i = 1,nloop do
local function jacTests(noBias)
noBias = noBias or false
local input = torch.randn(bs,from,inj,ini)
local sconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH)
if noBias then
sconv:noBias()
end
local output = sconv:forward(input)
local gradOutput = output:clone():normal()
sconv:zeroGradParameters()
groundgrad = sconv:backward(input, gradOutput)
end
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
tm.cpu = a:time().real
local groundgrad = sconv:backward(input, gradOutput)
local a = torch.Timer()
for i = 1,nloop do
sconv:zeroGradParameters()
groundgrad = sconv:backward(input, gradOutput)
end
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
tm.cpu = a:time().real

input = input:cuda()
gradOutput = gradOutput:cuda()
local gconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH):cuda()
gconv.weight = sconv.weight:cuda()
gconv.bias = sconv.bias:cuda()
gconv:forward(input)
gconv:zeroGradParameters()
local rescuda = gconv:backward(input, gradOutput)
a:reset()
for i = 1,nloop do
input = input:cuda()
gradOutput = gradOutput:cuda()
local gconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj,padW,padH,adjW,adjH):cuda()
if noBias then
gconv:noBias()
end
gconv.weight = sconv.weight:cuda()
if gconv.bias then
gconv.bias = sconv.bias:cuda()
end
gconv:forward(input)
gconv:zeroGradParameters()
rescuda = gconv:backward(input, gradOutput)
end
local weightcuda = gconv.gradWeight
local biascuda = gconv.gradBias
cutorch.synchronize()
tm.gpu = a:time().real
local rescuda = gconv:backward(input, gradOutput)
a:reset()
for i = 1,nloop do
gconv:zeroGradParameters()
rescuda = gconv:backward(input, gradOutput)
end
local weightcuda = gconv.gradWeight
cutorch.synchronize()
tm.gpu = a:time().real

local error = rescuda:float() - groundgrad
local werror = weightcuda:float() - groundweight
local berror = biascuda:float() - groundbias
local error = rescuda:float() - groundgrad
local werror = weightcuda:float() - groundweight

mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')
mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')
if gconv.bias then
local berror = gconv.gradBias:float() - groundbias
mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
end
end

jacTests(false)
jacTests(true)
end

function cunntest.SpatialDilatedConvolution_forward_single()
Expand Down

0 comments on commit 0e7f438

Please sign in to comment.