Facebook
From Red Monkey, 4 Years ago, written in Plain Text.
Embed
Download Paste or View Raw
Hits: 204
  1. GPU compilation of #25(CuArrays.CuKernelState, CUDAnative.CuDeviceArray{ForwardDiff.Dual{Nothing,Float32,2},2,CUDAnative.AS.Global}, Base.Broadcast.Broadcasted{Nothing,Tuple{Base.OneTo{Int64},Base.OneTo{Int64}},Zygote.var"#1596#1599"{typeof(-)},Tuple{Base.Broadcast.Extruded{CUDAnative.CuDeviceArray{Float32,2,CUDAnative.AS.Global},Tuple{Bool,Bool},Tuple{Int64,Int64}},Base.Broadcast.Extruded{DiffEqArray{Float32,2,Array{CuArrays.CuArray{Float32,1,Nothing},1},StepRangeLen{Float32,Float64,Float64}},Tuple{Bool,Bool},Tuple{Int64,Int64}}}}) failed
  2. KernelError: recursion is currently not supported
  3.  
  4. Try inspecting the generated code with any of the @device_code_... macros.
  5.  
  6. Stacktrace:
  7.  [1] _nextind_str at strings/string.jl:140
  8.  [2] nextind at strings/string.jl:136
  9.  [3] _nextind_str at strings/string.jl:140
  10.  [4] _split at strings/util.jl:325
  11.  [5] env_override_minlevel at logging.jl:419
  12.  [6] current_logger_for_env at logging.jl:383
  13.  [7] assertscalar at /root/.julia/packages/GPUArrays/1wgPO/src/indexing.jl:13
  14.  [8] #25 at /root/.julia/packages/GPUArrays/1wgPO/src/broadcast.jl:49
  15. (::CUDAnative.var"#hook_emit_function#94"{CUDAnative.CompilerJob,Array{Core.MethodInstance,1}})(::Core.MethodInstance, ::Core.CodeInfo, ::UInt64) at irgen.jl:102
  16. compile_method_instance(::CUDAnative.CompilerJob, ::Core.MethodInstance, ::UInt64) at irgen.jl:153
  17. macro expansion at TimerOutput.jl:228 [inlined]
  18. irgen(::CUDAnative.CompilerJob, ::Core.MethodInstance, ::UInt64) at irgen.jl:167
  19. macro expansion at TimerOutput.jl:228 [inlined]
  20. macro expansion at driver.jl:99 [inlined]
  21. macro expansion at TimerOutput.jl:228 [inlined]
  22. #codegen#151(::Bool, ::Bool, ::Bool, ::Bool, ::Bool, ::typeof(CUDAnative.codegen), ::Symbol, ::CUDAnative.CompilerJob) at driver.jl:98
  23. #codegen at none:0 [inlined]
  24. #compile#150(::Bool, ::Bool, ::Bool, ::Bool, ::Bool, ::typeof(CUDAnative.compile), ::Symbol, ::CUDAnative.CompilerJob) at driver.jl:47
  25. #compile#149 at none:0 [inlined]
  26. #compile at none:0 [inlined]
  27. #compile at none:0 [inlined]
  28. macro expansion at execution.jl:393 [inlined]
  29. #cufunction#195(::Nothing, ::Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{,Tuple{}}}, ::typeof(CUDAnative.cufunction), ::GPUArrays.var"#25#26", ::Type{Tuple{CuArrays.CuKernelState,CUDAnative.CuDeviceArray{ForwardDiff.Dual{Nothing,Float32,2},2,CUDAnative.AS.Global},Base.Broadcast.Broadcasted{Nothing,Tuple{Base.OneTo{Int64},Base.OneTo{Int64}},Zygote.var"#1596#1599"{typeof(-)},Tuple{Base.Broadcast.Extruded{CUDAnative.CuDeviceArray{Float32,2,CUDAnative.AS.Global},Tuple{Bool,Bool},Tuple{Int64,Int64}},Base.Broadcast.Extruded{DiffEqArray{Float32,2,Array{CuArrays.CuArray{Float32,1,Nothing},1},StepRangeLen{Float32,Float64,Float64}},Tuple{Bool,Bool},Tuple{Int64,Int64}}}}}}) at execution.jl:360
  30. cufunction(::Function, ::Type) at execution.jl:360
  31. macro expansion at execution.jl:179 [inlined]
  32. macro expansion at gcutils.jl:91 [inlined]
  33. macro expansion at execution.jl:176 [inlined]
  34. _gpu_call(::CuArrays.CuArrayBackend, ::Function, ::CuArrays.CuArray{ForwardDiff.Dual{Nothing,Float32,2},2,Nothing}, ::Tuple{CuArrays.CuArray{ForwardDiff.Dual{Nothing,Float32,2},2,Nothing},Base.Broadcast.Broadcasted{Nothing,Tuple{Base.OneTo{Int64},Base.OneTo{Int64}},Zygote.var"#1596#1599"{typeof(-)},Tuple{Base.Broadcast.Extruded{CuArrays.CuArray{Float32,2,Nothing},Tuple{Bool,Bool},Tuple{Int64,Int64}},Base.Broadcast.Extruded{DiffEqArray{Float32,2,Array{CuArrays.CuArray{Float32,1,Nothing},1},StepRangeLen{Float32,Float64,Float64}},Tuple{Bool,Bool},Tuple{Int64,Int64}}}}}, ::Tuple{Tuple{Int64},Tuple{Int64}}) at gpuarray_interface.jl:62
  35. gpu_call(::Function, ::CuArrays.CuArray{ForwardDiff.Dual{Nothing,Float32,2},2,Nothing}, ::Tuple{CuArrays.CuArray{ForwardDiff.Dual{Nothing,Float32,2},2,Nothing},Base.Broadcast.Broadcasted{Nothing,Tuple{Base.OneTo{Int64},Base.OneTo{Int64}},Zygote.var"#1596#1599"{typeof(-)},Tuple{Base.Broadcast.Extruded{CuArrays.CuArray{Float32,2,Nothing},Tuple{Bool,Bool},Tuple{Int64,Int64}},Base.Broadcast.Extruded{DiffEqArray{Float32,2,Array{CuArrays.CuArray{Float32,1,Nothing},1},StepRangeLen{Float32,Float64,Float64}},Tuple{Bool,Bool},Tuple{Int64,Int64}}}}}, ::Int64) at abstract_gpu_interface.jl:151
  36. gpu_call at abstract_gpu_interface.jl:128 [inlined]
  37. copyto! at broadcast.jl:48 [inlined]
  38. copyto! at broadcast.jl:863 [inlined]
  39. copy(::Base.Broadcast.Broadcasted{Base.Broadcast.ArrayStyle{CuArrays.CuArray},Tuple{Base.OneTo{Int64},Base.OneTo{Int64}},Zygote.var"#1596#1599"{typeof(-)},Tuple{CuArrays.CuArray{Float32,2,Nothing},DiffEqArray{Float32,2,Array{CuArrays.CuArray{Float32,1,Nothing},1},StepRangeLen{Float32,Float64,Float64}}}}) at broadcast.jl:839
  40. materialize at broadcast.jl:819 [inlined]
  41. broadcast_forward at broadcast.jl:168 [inlined]
  42. adjoint at broadcast.jl:178 [inlined]
  43. _pullback at adjoint.jl:47 [inlined]
  44. adjoint at lib.jl:153 [inlined]
  45. _pullback at adjoint.jl:47 [inlined]
  46. broadcasted at broadcast.jl:1237 [inlined]
  47. _pullback(::Zygote.Context, ::typeof(Base.Broadcast.broadcasted), ::typeof(-), ::CuArrays.CuArray{Float32,...