chengchingwen / Transformers.jl

Julia Implementation of Transformer models

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

gtp_neo model functor throw UndefVarError: LocalMask not defined

os-esh opened this issue · comments

commented

I tried to generate text using gpt_neo, but when calling the model functor, an error about LocalMask is thrown

Code with downloading and calling model:

model_name = "gpt_neo"
task_type = "Model"
hgf_model_name = "EleutherAI/gpt-neo-2.7B"

cfg = load_config(hgf_model_name; cache = false)
model = load_model(model_name, hgf_model_name, task_type; config = cfg)
textenc = load_tokenizer(hgf_model_name; config = cfg)

prompt = "There is some text"
encoded = encode(textenc, prompt).token
input = (;token=encoded)
model(input)

Part of error stacktrace:

ERROR: UndefVarError: LocalMask not defined
Stacktrace:
  [1] get_attention_func_args(op::Transformers.Layers.LocalCausalMultiheadQKVDotAttenOp{Nothing}, q::Array{Float32, 3}, k::Array{Float32, 3}, v::Array{Float32, 3}, mask::Nothing)
    @ Transformers.Layers ~/.julia/packages/Transformers/mTPRs/src/layers/attention_op.jl:129
  [2] (::Transformers.Layers.LocalCausalMultiheadQKVDotAttenOp{Nothing})(::Array{Float32, 3}, ::Vararg{Any})
    @ NeuralAttentionlib ~/.julia/packages/NeuralAttentionlib/mh5Es/src/types.jl:9
  [3] apply_attention_op(op::Transformers.Layers.LocalCausalMultiheadQKVDotAttenOp{Nothing}, nt::NamedTuple{(:hidden_state, :memory), Tuple{Tuple{Array{Float32, 3}, Array{Float32, 3}, Array{Float32, 3}}, GPT2TextEncoder{Transformers.TextEncoders.TextTokenizer{TextEncodeBase.MatchTokenization{TextEncodeBase.CodeNormalizer{BytePairEncoding.BPETokenization{BytePairEncoding.GPT2Tokenization, BytePairEncoding.CachedBPE{BytePairEncoding.BPE, Dict{String, Vector{String}}}}, TextEncodeBase.CodeMap{UInt8, UInt16}}}}, Vocab{String, StaticArraysCore.SizedVector{50257, String, Vector{String}}}, Pipelines{Tuple{Pipeline{:token, FuncPipelines.ApplyN{1, Base.Fix1{typeof(TextEncodeBase.nestedcall), typeof(Transformers.TextEncoders.string_getvalue)}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, typeof(Transformers.TextEncoders.grouping_sentence)}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, Base.Fix1{TextEncodeBase.SequenceTemplate{String, Tuple{TextEncodeBase.RepeatedTerm{String, Tuple{TextEncodeBase.InputTerm{String}}}}}, Val{1}}}}}, Pipeline{:attention_mask, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, ComposedFunction{Type{NeuralAttentionlib.RevLengthMask}, FuncPipelines.FixRest{typeof(Transformers.TextEncoders.getlengths), Tuple{Int64}}}}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, FuncPipelines.FixRest{typeof(TextEncodeBase.trunc_and_pad), Tuple{Int64, String, Symbol, Symbol}}}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, typeof(TextEncodeBase.nested2batch)}}}, PipeGet{(:token, :attention_mask)}}}, TextEncodeBase.CodeMap{UInt8, UInt16}}}})
    @ Transformers.Layers ~/.julia/packages/Transformers/mTPRs/src/layers/attention_op.jl:29
  [4] apply_on_namedtuple
    @ ~/.julia/packages/Transformers/mTPRs/src/layers/attention_op.jl:208 [inlined]
  [5] (::Transformers.Layers.SelfAttention{Transformers.Layers.LocalCausalMultiheadQKVDotAttenOp{Nothing}, Transformers.Layers.Fork{Tuple{Transformers.Layers.Dense{Nothing, Matrix{Float32}, Nothing}, Transformers.Layers.Dense{Nothing, Matrix{Float32}, Nothing}, Transformers.Layers.Dense{Nothing, Matrix{Float32}, Nothing}}}, Transformers.Layers.Dense{Nothing, Matrix{Float32}, Vector{Float32}}})(nt::NamedTuple{(:hidden_state, :memory), Tuple{Array{Float32, 3}, GPT2TextEncoder{Transformers.TextEncoders.TextTokenizer{TextEncodeBase.MatchTokenization{TextEncodeBase.CodeNormalizer{BytePairEncoding.BPETokenization{BytePairEncoding.GPT2Tokenization, BytePairEncoding.CachedBPE{BytePairEncoding.BPE, Dict{String, Vector{String}}}}, TextEncodeBase.CodeMap{UInt8, UInt16}}}}, Vocab{String, StaticArraysCore.SizedVector{50257, String, Vector{String}}}, Pipelines{Tuple{Pipeline{:token, FuncPipelines.ApplyN{1, Base.Fix1{typeof(TextEncodeBase.nestedcall), typeof(Transformers.TextEncoders.string_getvalue)}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, typeof(Transformers.TextEncoders.grouping_sentence)}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, Base.Fix1{TextEncodeBase.SequenceTemplate{String, Tuple{TextEncodeBase.RepeatedTerm{String, Tuple{TextEncodeBase.InputTerm{String}}}}}, Val{1}}}}}, Pipeline{:attention_mask, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, ComposedFunction{Type{NeuralAttentionlib.RevLengthMask}, FuncPipelines.FixRest{typeof(Transformers.TextEncoders.getlengths), Tuple{Int64}}}}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, FuncPipelines.FixRest{typeof(TextEncodeBase.trunc_and_pad), Tuple{Int64, String, Symbol, Symbol}}}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, typeof(TextEncodeBase.nested2batch)}}}, PipeGet{(:token, :attention_mask)}}}, TextEncodeBase.CodeMap{UInt8, UInt16}}}})
    @ Transformers.Layers ~/.julia/packages/Transformers/mTPRs/src/layers/layer.jl:137
  [6] apply_on_namedtuple
    @ ~/.julia/packages/Transformers/mTPRs/src/layers/architecture.jl:80 [inlined]
  [7] DropoutLayer
    @ ~/.julia/packages/Transformers/mTPRs/src/layers/layer.jl:16 [inlined]
  [8] apply_on_namedtuple
    @ ~/.julia/packages/Transformers/mTPRs/src/layers/architecture.jl:80 [inlined]
  [9] (::Transformers.Layers.PreNormResidual{Transformers.Layers.DropoutLayer{Transformers.Layers.SelfAttention{Transformers.Layers.LocalCausalMultiheadQKVDotAttenOp{Nothing}, Transformers.Layers.Fork{Tuple{Transformers.Layers.Dense{Nothing, Matrix{Float32}, Nothing}, Transformers.Layers.Dense{Nothing, Matrix{Float32}, Nothing}, Transformers.Layers.Dense{Nothing, Matrix{Float32}, Nothing}}}, Transformers.Layers.Dense{Nothing, Matrix{Float32}, Vector{Float32}}}, Nothing}, Transformers.Layers.LayerNorm{Vector{Float32}, Vector{Float32}, Float32}})(nt::NamedTuple{(:hidden_state, :memory), Tuple{Array{Float32, 3}, GPT2TextEncoder{Transformers.TextEncoders.TextTokenizer{TextEncodeBase.MatchTokenization{TextEncodeBase.CodeNormalizer{BytePairEncoding.BPETokenization{BytePairEncoding.GPT2Tokenization, BytePairEncoding.CachedBPE{BytePairEncoding.BPE, Dict{String, Vector{String}}}}, TextEncodeBase.CodeMap{UInt8, UInt16}}}}, Vocab{String, StaticArraysCore.SizedVector{50257, String, Vector{String}}}, Pipelines{Tuple{Pipeline{:token, FuncPipelines.ApplyN{1, Base.Fix1{typeof(TextEncodeBase.nestedcall), typeof(Transformers.TextEncoders.string_getvalue)}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, typeof(Transformers.TextEncoders.grouping_sentence)}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, Base.Fix1{TextEncodeBase.SequenceTemplate{String, Tuple{TextEncodeBase.RepeatedTerm{String, Tuple{TextEncodeBase.InputTerm{String}}}}}, Val{1}}}}}, Pipeline{:attention_mask, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, ComposedFunction{Type{NeuralAttentionlib.RevLengthMask}, FuncPipelines.FixRest{typeof(Transformers.TextEncoders.getlengths), Tuple{Int64}}}}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, FuncPipelines.FixRest{typeof(TextEncodeBase.trunc_and_pad), Tuple{Int64, String, Symbol, Symbol}}}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, typeof(TextEncodeBase.nested2batch)}}}, PipeGet{(:token, :attention_mask)}}}, TextEncodeBase.CodeMap{UInt8, UInt16}}}})
    @ Transformers.Layers ~/.julia/packages/Transformers/mTPRs/src/layers/layer.jl:69
 [10] apply_on_namedtuple
    @ ~/.julia/packages/Transformers/mTPRs/src/layers/architecture.jl:80 [inlined]
 [11] (::PreNormTransformerBlock{Transformers.Layers.DropoutLayer{Transformers.Layers.SelfAttention{Transformers.Layers.LocalCausalMultiheadQKVDotAttenOp{Nothing}, Transformers.Layers.Fork{Tuple{Transformers.Layers.Dense{Nothing, Matrix{Float32}, Nothing}, Transformers.Layers.Dense{Nothing, Matrix{Float32}, Nothing}, Transformers.Layers.Dense{Nothing, Matrix{Float32}, Nothing}}}, Transformers.Layers.Dense{Nothing, Matrix{Float32}, Vector{Float32}}}, Nothing}, Transformers.Layers.LayerNorm{Vector{Float32}, Vector{Float32}, Float32}, Transformers.Layers.DropoutLayer{Transformers.Layers.Chain{Tuple{Transformers.Layers.Dense{typeof(gelu), Matrix{Float32}, Vector{Float32}}, Transformers.Layers.Dense{Nothing, Matrix{Float32}, Vector{Float32}}}}, Nothing}, Transformers.Layers.LayerNorm{Vector{Float32}, Vector{Float32}, Float32}})(nt::NamedTuple{(:hidden_state, :memory), Tuple{Array{Float32, 3}, GPT2TextEncoder{Transformers.TextEncoders.TextTokenizer{TextEncodeBase.MatchTokenization{TextEncodeBase.CodeNormalizer{BytePairEncoding.BPETokenization{BytePairEncoding.GPT2Tokenization, BytePairEncoding.CachedBPE{BytePairEncoding.BPE, Dict{String, Vector{String}}}}, TextEncodeBase.CodeMap{UInt8, UInt16}}}}, Vocab{String, StaticArraysCore.SizedVector{50257, String, Vector{String}}}, Pipelines{Tuple{Pipeline{:token, FuncPipelines.ApplyN{1, Base.Fix1{typeof(TextEncodeBase.nestedcall), typeof(Transformers.TextEncoders.string_getvalue)}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, typeof(Transformers.TextEncoders.grouping_sentence)}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, Base.Fix1{TextEncodeBase.SequenceTemplate{String, Tuple{TextEncodeBase.RepeatedTerm{String, Tuple{TextEncodeBase.InputTerm{String}}}}}, Val{1}}}}}, Pipeline{:attention_mask, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, ComposedFunction{Type{NeuralAttentionlib.RevLengthMask}, FuncPipelines.FixRest{typeof(Transformers.TextEncoders.getlengths), Tuple{Int64}}}}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, FuncPipelines.FixRest{typeof(TextEncodeBase.trunc_and_pad), Tuple{Int64, String, Symbol, Symbol}}}}}, Pipeline{:token, FuncPipelines.ApplyN{2, FuncPipelines.ApplySyms{:token, typeof(TextEncodeBase.nested2batch)}}}, PipeGet{(:token, :attention_mask)}}}, TextEncodeBase.CodeMap{UInt8, UInt16}}}})
    @ Transformers.Layers ~/.julia/packages/Transformers/mTPRs/src/layers/layer.jl:44
commented

Thanks!