denizyuret/Knet.jl

MethodError: no method matching iterate(::Nothing) while using params()

johnberg1 opened this issue · 2 comments

Hello,

I implemented a Discriminator structure for my project and implemented a discriminator loss function to calculate the loss. I can calculate the loss function using @diff macro. However, when I use the params() function on the calculated value, i get an error.

Discriminator.zip

You can find the notebook I used in the attachment, I also included my code on the following lines:

using Knet
using CUDA
using Statistics
atype = Knet.atype()

mutable struct Conv3x3; w; act; f; end
(c::Conv3x3)(x) = if c.act c.f.(conv4(c.w, x, padding = 1, stride = 1, mode = 1)) else conv4(c.w, x, padding = 1, stride = 1, mode = 1) end
Conv3x3(cx::Int,cy::Int,act,f=leakyrelu) = Conv3x3(param(3,3,cx,cy),act,f)

mutable struct Conv1x1; w; act; f; end
(c::Conv1x1)(x) = if c.act c.f.(conv4(c.w, x, padding = 0, stride = 1, mode = 1)) else conv4(c.w, x, padding = 0, stride = 1, mode = 1) end
Conv1x1(cx::Int,cy::Int,act,f=leakyrelu) = Conv1x1(param(1,1,cx,cy),act,f)

mutable struct Dense; w; b; p; end
(d::Dense)(x) = d.w * mat(dropout(x,d.p)) .+ d.b # mat reshapes 4-D tensor to 2-D matrix so we can use matmul
Dense(i::Int,o::Int;pdrop=0) = Dense(param(o,i), param0(o), pdrop)

mutable struct Dense1; w; b; f; p; end
(d::Dense1)(x) = d.f.(d.w * mat(dropout(x,d.p)) .+ d.b) # mat reshapes 4-D tensor to 2-D matrix so we can use matmul
Dense1(i::Int,o::Int,f=relu;pdrop=0) = Dense1(param(o,i), param0(o), f, pdrop)

struct Chain
    layers
    Chain(layers...) = new(layers)
end
(c::Chain)(x) = (for l in c.layers; x = l(x); end; x)

struct Downsample; window; end
(d::Downsample)(x) = pool(x, window=d.window, mode=1)

leakyrelu(x; alpha = 0.01) = max(alpha .* x, x)

struct ResDownBlock
    downsample
    downsampler
    cIn
    cOut
    convs
    convShortcut
    firstBlock
    function ResDownBlock(cIn::Int,cOut::Int,downsample::Bool = true, firstBlock::Bool = false)
        downsampler = Downsample(2)
        convs = Chain(Conv3x3(cIn,cOut,true),Conv3x3(cOut,cOut,false))
        convShortcut = Chain(Conv1x1(cIn,cOut,false))
        new(downsample,downsampler, cIn, cOut, convs, convShortcut, firstBlock)
    end
end

function (rdb::ResDownBlock)(x)
    x_residual = copy(x)
    
    if !rdb.firstBlock
        x = leakyrelu.(x)
    end
    
    x = rdb.convs(x)
    
    if rdb.downsample
        x = rdb.downsampler(x)
    end
    
    if rdb.downsample
        if rdb.firstBlock
            x_residual = rdb.downsampler(x_residual)
            x_residual = rdb.convShortcut(x_residual)
        else
            x_residual = rdb.convShortcut(x_residual)
            x_residual = rdb.downsampler(x_residual)
        end
    end
    return x + x_residual
end

struct Discriminator
    resdown1
    resdown2
    resdown3
    resdown4
    resdown5
    resdown6
    linear
    conditionProjector
    batchSize
    function Discriminator(hiddenDim::Int = 1024, discCondChannels::Int = 1024, batchSize::Int = 1)
        resdown1 = ResDownBlock(3,64,true,true)
        resdown2 = ResDownBlock(64,128,true,false)
        resdown3 = ResDownBlock(128,256,true,false)
        resdown4 = ResDownBlock(256,512,true,false)
        resdown5 = ResDownBlock(512,1024,true,false)
        resdown6 = ResDownBlock(1024,1024,false,false)
        linear = Chain(Dense(1024,1))
        conditionProjector = Chain(Dense1(hiddenDim,1024),Dense(1024,discCondChannels))
        new(resdown1, resdown2, resdown3, resdown4, resdown5, resdown6, linear, conditionProjector, batchSize)
    end
end

function (d::Discriminator)(x, y, prev_image)
    prev_image = d.resdown1(prev_image)
    prev_image = d.resdown2(prev_image)
    prev_image = d.resdown3(prev_image)
    
    y = d.conditionProjector(y)
    
    x = d.resdown1(x)
    x = d.resdown2(x)
    x = d.resdown3(x)
    
    x = x - prev_image
    
    x = d.resdown4(x)
    x = d.resdown5(x)
    x = d.resdown6(x)
    
    x = leakyrelu.(x)
    x = sum(x, dims=1)
    x = sum(x, dims=2)
    
    out = d.linear(x)
    x = reshape(x, 1024, 1)
    c = sum(y .* x)
    out = out[1] + c
end

function discriminatorLoss(D::Discriminator, fake_image, real_image, y, prev_image)
    d_fake = D(fake_image, y, prev_image)
    d_real = D(real_image, y, prev_image)
    l_real = mean(relu.(1 - d_real))
    l_fake = mean(relu.(1 + d_fake))
    return l_real + l_fake
end

D = Discriminator();
real_image = atype(rand(128,128,3,1))
condition = atype(rand(1024,1))
prev_image = atype(rand(128,128,3,1))
fake_image = atype(rand(128,128,3,1));

dloss = @diff discriminatorLoss(D,fake_image,real_image,condition,prev_image)

params(dloss)

After the last line, I get the following error message:

MethodError: no method matching iterate(::Nothing)
Closest candidates are:
  iterate(!Matched::Base.EnvDict) at env.jl:119
  iterate(!Matched::Base.EnvDict, !Matched::Any) at env.jl:119
  iterate(!Matched::Base.AsyncGenerator, !Matched::Base.AsyncGeneratorState) at asyncmap.jl:382
  ...

Stacktrace:
 [1] isempty(::Nothing) at ./essentials.jl:737
 [2] show(::IOContext{Base.GenericIOBuffer{Array{UInt8,1}}}, ::AutoGrad.Node) at /kuacc/users/abaykal20/.julia/packages/AutoGrad/VFrAv/src/show.jl:44
 [3] show_delim_array(::IOContext{Base.GenericIOBuffer{Array{UInt8,1}}}, ::Array{AutoGrad.Node,1}, ::Char, ::String, ::String, ::Bool, ::Int64, ::Int64) at ./show.jl:744
 [4] show_vector(::IOContext{Base.GenericIOBuffer{Array{UInt8,1}}}, ::Array{AutoGrad.Node,1}, ::Char, ::Char) at ./arrayshow.jl:472
 [5] show_vector at ./arrayshow.jl:461 [inlined]
 [6] show(::IOContext{Base.GenericIOBuffer{Array{UInt8,1}}}, ::Array{AutoGrad.Node,1}) at ./arrayshow.jl:432
 [7] _show_default(::IOContext{Base.GenericIOBuffer{Array{UInt8,1}}}, ::Any) at ./show.jl:406
 [8] show_default at ./show.jl:389 [inlined]
 [9] show(::IOContext{Base.GenericIOBuffer{Array{UInt8,1}}}, ::Any) at ./show.jl:384
 [10] _show_default(::IOContext{Base.GenericIOBuffer{Array{UInt8,1}}}, ::Any) at ./show.jl:406
 [11] show_default at ./show.jl:389 [inlined]
 [12] show at ./show.jl:384 [inlined]
 [13] show at ./multimedia.jl:47 [inlined]
 [14] limitstringmime(::MIME{Symbol("text/plain")}, ::Base.Generator{Base.Iterators.Filter{AutoGrad.var"#199#201",Array{AutoGrad.Node,1}},AutoGrad.var"#198#200"}) at /kuacc/users/abaykal20/.julia/packages/IJulia/IDNmS/src/inline.jl:43
 [15] display_mimestring at /kuacc/users/abaykal20/.julia/packages/IJulia/IDNmS/src/display.jl:71 [inlined]
 [16] display_dict(::Base.Generator{Base.Iterators.Filter{AutoGrad.var"#199#201",Array{AutoGrad.Node,1}},AutoGrad.var"#198#200"}) at /kuacc/users/abaykal20/.julia/packages/IJulia/IDNmS/src/display.jl:102
 [17] #invokelatest#1 at ./essentials.jl:710 [inlined]
 [18] invokelatest at ./essentials.jl:709 [inlined]
 [19] execute_request(::ZMQ.Socket, ::IJulia.Msg) at /kuacc/users/abaykal20/.julia/packages/IJulia/IDNmS/src/execute_request.jl:112
 [20] #invokelatest#1 at ./essentials.jl:710 [inlined]
 [21] invokelatest at ./essentials.jl:709 [inlined]
 [22] eventloop(::ZMQ.Socket) at /kuacc/users/abaykal20/.julia/packages/IJulia/IDNmS/src/eventloop.jl:8
 [23] (::IJulia.var"#15#18")() at ./task.jl:356

When I call

params(D)

it works as intended, listing all the parameters I defined in my struct:

24-element Array{Param,1}:
 P(KnetArray{Float32,4}(3,3,3,64))
 P(KnetArray{Float32,4}(3,3,64,64))
 P(KnetArray{Float32,4}(1,1,3,64))
 P(KnetArray{Float32,4}(3,3,64,128))
 P(KnetArray{Float32,4}(3,3,128,128))
 P(KnetArray{Float32,4}(1,1,64,128))
 P(KnetArray{Float32,4}(3,3,128,256))
 P(KnetArray{Float32,4}(3,3,256,256))
 P(KnetArray{Float32,4}(1,1,128,256))
 P(KnetArray{Float32,4}(3,3,256,512))
 P(KnetArray{Float32,4}(3,3,512,512))
 P(KnetArray{Float32,4}(1,1,256,512))
 P(KnetArray{Float32,4}(3,3,512,1024))
 P(KnetArray{Float32,4}(3,3,1024,1024))
 P(KnetArray{Float32,4}(1,1,512,1024))
 P(KnetArray{Float32,4}(3,3,1024,1024))
 P(KnetArray{Float32,4}(3,3,1024,1024))
 P(KnetArray{Float32,4}(1,1,1024,1024))
 P(KnetArray{Float32,2}(1,1024))
 P(KnetArray{Float32,1}(1))
 P(KnetArray{Float32,2}(1024,1024))
 P(KnetArray{Float32,1}(1024))
 P(KnetArray{Float32,2}(1024,1024))
 P(KnetArray{Float32,1}(1024))

What could be the reason of this error and how can I fix it?
Thanks

I cannot replicate this with the latest Knet version (1.4.5). Here are the other relevant packages, see if you have the same versions:

  [6710c13c] AutoGrad v1.2.4                                                                                                                
  [052768ef] CUDA v2.4.1                                                                                                                    
  [1902f260] Knet v1.4.5                                                                                                                    

The error disappeared when I updated my packages, thanks.