Error when I use tensorflow 'load_model' | بلاگ

Error when I use tensorflow 'load_model'

تعرفه تبلیغات در سایت

آخرین مطالب

امکانات وب

Vote count: 0

Recently, my instructor asked me to run DeepGO. But I got stuck in running

python2.7 predict_all.py -i data/embeddings.fa

where embeddings.fa is just a protein FASTA file.

Here is error message:

/home/zhusf/.local/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarnin
g: Conversion of the second argument of issubdtype from `float` to `np.floating`
 is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).
type`.
  from ._conv import register_converters as _register_converters
Using TensorFlow backend.
/home/zhusf/.local/lib/python2.7/site-    packages/keras/engine/topology.py:1269: Us
erWarning: Update your `InputLayer` call to the Keras 2 API: `InputLayer(dtype="
int32", batch_input_shape=[None, 100..., sparse=False, name="input1")`
  return cls(**config)
/home/zhusf/.local/lib/python2.7/site-    packages/keras/engine/topology.py:1269: Us
erWarning: The `dropout` argument is no longer support in `Embedding`. You can a
pply a `keras.layers.SpatialDropout1D` layer right after the `Embedding` layer t
o get the same behavior.
  return cls(**config)
/home/zhusf/.local/lib/python2.7/site-packages/keras/engine/topology.py:1269: Us
erWarning: Update your `Embedding` call to the Keras 2 API:     `Embedding(embedding
s_initializer="uniform", name="embedding_1", activity_regularizer=None, trainabl
e=True, embeddings_regularizer=None, input_dtype="int32", embeddings_constraint=
None, mask_zero=False, input_dim=8001, batch_input_shape=[None, 100..., output_d
im=128, input_length=1000)`
  return cls(**config)
('Gram length:', 3)
('Vocabulary size:', 8000)
Init
Traceback (most recent call last):
  File "predict_all.py", line 138, in 
    main()
  File "/home/zhusf/.local/lib/python2.7/site-packages/click/core.py", line 722,
 in __call__
    return self.main(*args, **kwargs)
  File "/home/zhusf/.local/lib/python2.7/site-packages/click/core.py", line 697,
 in main
    rv = self.invoke(ctx)
  File "/home/zhusf/.local/lib/python2.7/site-packages/click/core.py", line 895,
 in invoke
    return ctx.invoke(self.callback, **ctx.params)
  File "/home/zhusf/.local/lib/python2.7/site-packages/click/core.py", line 535,
 in invoke
    return callback(*args, **kwargs)
  File "predict_all.py", line 31, in main
    results = predict_functions(sequences)
  File "predict_all.py", line 122, in predict_functions
    init_models()
  File "predict_all.py", line 111, in init_models
    model = load_model('data/models/model_%s.h5' % onto)
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/models.py", line 24
3, in load_model
    model = model_from_config(model_config, custom_objects=custom_objects)
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/models.py", line 31
7, in model_from_config
    return layer_module.deserialize(config, custom_objects=custom_objects)
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/layers/__init__.py", line 55, in deserialize
    printable_module_name='layer')
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/utils/generic_utils.py", line 143, in     deserialize_keras_object
    list(custom_objects.items())))
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/models.py", line 1353, in from_config
    model.add(layer)
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/models.py", line 467, in add
    layer(x)
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/engine/topology.py", line 590, in __call__
    self.build(input_shapes[0])
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/layers/embeddings.py", line 105, in build
    dtype=self.dtype)
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/legacy/interfaces.py", line 91, in wrapper
    return func(*args, **kwargs)
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/engine/topology.py", line 414, in add_weight
    constraint=constraint)
  File "/home/zhusf/.local/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py", line 392, in variable
    v = tf.Variable(value, dtype=tf.as_dtype(dtype), name=name)
  File "/home/zhusf/.local/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 200, in __init__
    expected_shape=expected_shape)
  File "/home/zhusf/.local/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 289, in _init_from_args
    initial_value, name="initial_value", dtype=dtype)
  File "/home/zhusf/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 676, in convert_to_tensor
    as_ref=False)
  File "/home/zhusf/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 741, in internal_convert_to_tensor
    ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
  File "/home/zhusf/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 614, in _TensorTensorConversionFunction
    % (dtype.name, t.dtype.name, str(t)))
ValueError: Tensor conversion requested dtype int32 for Tensor with dtype float32: 'Tensor("embedding_1/random_uniform:0", shape=(8001, 128), dtype=float32)'

Obviously, the problem occurs in load_model.

My python2.7's version is 2.7.10, tensorflow==1.2.0, keras==2.1.3. How can I fix this problem??? Thank you!!!

Here is predict_all.py:

#!/usr/bin/env python

import click as ck
import numpy as np
import pandas as pd
from keras.models import load_model
from subprocess import Popen, PIPE

models = list()
funcs = ['cc', 'mf', 'bp']


@ck.command()
@ck.option('--in_file', '-i', help='Input FASTA file', required=True)
@ck.option('--out_file', '-o', default='results.tsv', help='Output result file')
def main(in_file, out_file):

    ngram_df = pd.read_pickle('data/models/ngrams.pkl')
    global embed_df
    embed_df = pd.read_pickle('data/graph_new_embeddings.pkl')
    global vocab
    vocab = {}
    global gram_len
    for key, gram in enumerate(ngram_df['ngrams']):
        vocab[gram] = key + 1
        gram_len = len(ngram_df['ngrams'][0])
    print('Gram length:', gram_len)
    print('Vocabulary size:', len(vocab))

    ids, sequences = read_fasta(in_file)
    results = predict_functions(sequences)
    df = pd.DataFrame({'id': ids, 'predictions': results})
    df.to_csv(out_file, sep='t')


def read_fasta(filename):
    seqs = list()
    info = list()
    seq = ''
    inf = ''
    with open(filename) as f:
        for line in f:
            line = line.strip()
            if line.startswith('>'):
                if seq != '':
                    seqs.append(seq)
                    info.append(inf)
                    seq = ''
                inf = line[1:].split()[0]
            else:
                seq += line
        seqs.append(seq)
        info.append(inf)
    return info, seqs

def get_data(sequences):
    n = len(sequences)
    data = np.zeros((n, 1000), dtype=np.float32)
    embeds = np.zeros((n, 256), dtype=np.float32)

    p = Popen(['blastp', '-db', 'data/embeddings.fa',
               '-max_target_seqs', '1', '-num_threads', '128',
               '-outfmt', '6 qseqid sseqid'], stdin=PIPE, stdout=PIPE)
    for i in xrange(n):
        p.stdin.write('>' + str(i) + 'n' + sequences[i] + 'n')
    p.stdin.close()

    prot_ids = {}
    if p.wait() == 0:
        for line in p.stdout:
            print(line)
            it = line.strip().split('t')
            prot_ids[int(it[0])] = it[1]
    prots = embed_df[embed_df['accessions'].isin(set(prot_ids.values()))]
    embeds_dict = {}
    for i, row in prots.iterrows():
        embeds_dict[row['accessions']] = row['embeddings']

    for i, prot_id in prot_ids.iteritems():
        embeds[i, :] = embeds_dict[prot_id]

    for i in xrange(len(sequences)):
        seq = sequences[i]
        for j in xrange(len(seq) - gram_len + 1):
            data[i, j] = vocab[seq[j: (j + gram_len)]]
    return [data, embeds]


def predict(data, model, functions, threshold, batch_size=1):
    n = data[0].shape[0]
    result = list()
    for i in xrange(n):
        result.append(list())
    predictions = model.predict(
        data, batch_size=batch_size)
    for i in xrange(n):
        pred = (predictions[i] >= threshold).astype('int32')
        for j in xrange(len(functions)):
            if pred[j] == 1:
                result[i].append(functions[j])
    return result


def init_models(conf=None, **kwargs):
    print('Init')
    global models
    threshold = 0.3
    # sequences =     ['MKKVLVINGPNLNLLGIREKNIYGSVSYEDVLKSISRKAQELGFEVEFFQSNHEGEIIDKIHRAYFEKVDAIIINPGAYTHYSYAIHDAIKAVNIPTIEVHISNIHAREEFRHKSVIAPACTGQISGFGIKSYIIALYALKEILD']
    # data = get_data(sequences)
    for onto in funcs:
        model = load_model('data/models/model_%s.h5' % onto)
        df = pd.read_pickle('data/models/%s.pkl' % onto)
        functions = df['functions']
        models.append((model, functions))
        print 'Model %s initialized.' % onto
        # result = predict(data, model, functions, threshold)
        # print result


def predict_functions(sequences, threshold=0.3):
    if not models:
        init_models()
    data = get_data(sequences)
    result = list()
    n = len(sequences)
    for i in xrange(n):
        result.append([])
    for i in range(len(models)):
        model, functions = models[i]
        print 'Running predictions for model %s' % funcs[i]
        res = predict(data, model, functions, threshold)
        for j in xrange(n):
            result[j] += res[j]
    return result


if __name__ == '__main__':
    main()
asked 32 secs ago
Lizhi Liu

...
نویسنده : استخدام کار بازدید : 6 تاريخ : سه شنبه 29 اسفند 1396 ساعت: 8:52