14
14
import six
15
15
import tensorflow as tf
16
16
from tensorflow .core .protobuf import saved_model_pb2
17
- from tensorflow .python .platform import gfile
18
17
19
18
import easy_rec
20
19
from easy_rec .python .builders import strategy_builder
@@ -240,27 +239,27 @@ def _metric_cmp_fn(best_eval_result, current_eval_result):
240
239
241
240
def _check_model_dir (model_dir , continue_train ):
242
241
if not continue_train :
243
- if not gfile .IsDirectory (model_dir ):
244
- gfile .MakeDirs (model_dir )
242
+ if not tf . gfile .IsDirectory (model_dir ):
243
+ tf . gfile .MakeDirs (model_dir )
245
244
else :
246
- assert len (gfile .Glob (model_dir + '/model.ckpt-*.meta' )) == 0 , \
245
+ assert len (tf . gfile .Glob (model_dir + '/model.ckpt-*.meta' )) == 0 , \
247
246
'model_dir[=%s] already exists and not empty(if you ' \
248
247
'want to continue train on current model_dir please ' \
249
248
'delete dir %s or specify --continue_train[internal use only])' % (
250
249
model_dir , model_dir )
251
250
else :
252
- if not gfile .IsDirectory (model_dir ):
251
+ if not tf . gfile .IsDirectory (model_dir ):
253
252
logging .info ('%s does not exists, create it automatically' % model_dir )
254
- gfile .MakeDirs (model_dir )
253
+ tf . gfile .MakeDirs (model_dir )
255
254
256
255
257
256
def _get_ckpt_path (pipeline_config , checkpoint_path ):
258
257
if checkpoint_path != '' and checkpoint_path is not None :
259
- if gfile .IsDirectory (checkpoint_path ):
258
+ if tf . gfile .IsDirectory (checkpoint_path ):
260
259
ckpt_path = estimator_utils .latest_checkpoint (checkpoint_path )
261
260
else :
262
261
ckpt_path = checkpoint_path
263
- elif gfile .IsDirectory (pipeline_config .model_dir ):
262
+ elif tf . gfile .IsDirectory (pipeline_config .model_dir ):
264
263
ckpt_path = estimator_utils .latest_checkpoint (pipeline_config .model_dir )
265
264
logging .info ('checkpoint_path is not specified, '
266
265
'will use latest checkpoint %s from %s' %
@@ -284,7 +283,8 @@ def train_and_evaluate(pipeline_config_path, continue_train=False):
284
283
Returns:
285
284
None, the model will be saved into pipeline_config.model_dir
286
285
"""
287
- assert gfile .Exists (pipeline_config_path ), 'pipeline_config_path not exists'
286
+ assert tf .gfile .Exists (
287
+ pipeline_config_path ), 'pipeline_config_path not exists'
288
288
pipeline_config = config_util .get_configs_from_pipeline_file (
289
289
pipeline_config_path )
290
290
@@ -323,7 +323,7 @@ def _train_and_evaluate_impl(pipeline_config,
323
323
if estimator_utils .is_chief ():
324
324
_check_model_dir (pipeline_config .model_dir , continue_train )
325
325
config_util .save_pipeline_config (pipeline_config , pipeline_config .model_dir )
326
- with gfile .GFile (version_file , 'w' ) as f :
326
+ with tf . gfile .GFile (version_file , 'w' ) as f :
327
327
f .write (easy_rec .__version__ + '\n ' )
328
328
329
329
train_steps = None
@@ -509,7 +509,7 @@ def evaluate(pipeline_config,
509
509
model_dir = pipeline_config .model_dir
510
510
eval_result_file = os .path .join (model_dir , eval_result_filename )
511
511
logging .info ('save eval result to file %s' % eval_result_file )
512
- with gfile .GFile (eval_result_file , 'w' ) as ofile :
512
+ with tf . gfile .GFile (eval_result_file , 'w' ) as ofile :
513
513
result_to_write = {}
514
514
for key in sorted (eval_result ):
515
515
# skip logging binary data
@@ -562,10 +562,10 @@ def distribute_evaluate(pipeline_config,
562
562
return eval_result
563
563
model_dir = get_model_dir_path (pipeline_config )
564
564
eval_tmp_results_dir = os .path .join (model_dir , 'distribute_eval_tmp_results' )
565
- if not gfile .IsDirectory (eval_tmp_results_dir ):
565
+ if not tf . gfile .IsDirectory (eval_tmp_results_dir ):
566
566
logging .info ('create eval tmp results dir {}' .format (eval_tmp_results_dir ))
567
- gfile .MakeDirs (eval_tmp_results_dir )
568
- assert gfile .IsDirectory (
567
+ tf . gfile .MakeDirs (eval_tmp_results_dir )
568
+ assert tf . gfile .IsDirectory (
569
569
eval_tmp_results_dir ), 'tmp results dir not create success.'
570
570
os .environ ['eval_tmp_results_dir' ] = eval_tmp_results_dir
571
571
@@ -679,7 +679,7 @@ def distribute_evaluate(pipeline_config,
679
679
if cur_job_name == 'master' :
680
680
print ('eval_result = ' , eval_result )
681
681
logging .info ('eval_result = {0}' .format (eval_result ))
682
- with gfile .GFile (eval_result_file , 'w' ) as ofile :
682
+ with tf . gfile .GFile (eval_result_file , 'w' ) as ofile :
683
683
result_to_write = {'eval_method' : 'distribute' }
684
684
for key in sorted (eval_result ):
685
685
# skip logging binary data
@@ -766,8 +766,8 @@ def export(export_dir,
766
766
AssertionError, if:
767
767
* pipeline_config_path does not exist
768
768
"""
769
- if not gfile .Exists (export_dir ):
770
- gfile .MakeDirs (export_dir )
769
+ if not tf . gfile .Exists (export_dir ):
770
+ tf . gfile .MakeDirs (export_dir )
771
771
772
772
pipeline_config = config_util .get_configs_from_pipeline_file (pipeline_config )
773
773
if pipeline_config .fg_json_path :
@@ -830,10 +830,10 @@ def export(export_dir,
830
830
]
831
831
export_ts = export_ts [- 1 ]
832
832
saved_pb_path = os .path .join (final_export_dir , 'saved_model.pb' )
833
- with gfile .GFile (saved_pb_path , 'rb' ) as fin :
833
+ with tf . gfile .GFile (saved_pb_path , 'rb' ) as fin :
834
834
saved_model .ParseFromString (fin .read ())
835
835
saved_model .meta_graphs [0 ].meta_info_def .meta_graph_version = export_ts
836
- with gfile .GFile (saved_pb_path , 'wb' ) as fout :
836
+ with tf . gfile .GFile (saved_pb_path , 'wb' ) as fout :
837
837
fout .write (saved_model .SerializeToString ())
838
838
839
839
logging .info ('model has been exported to %s successfully' % final_export_dir )
0 commit comments