def main():
conf = SparkConf().setAppName("binarize nifti")
sc = SparkContext(conf=conf)
sc.setLogLevel('ERROR')
parser = argparse.ArgumentParser(description='Binarize images')
parser.add_argument('threshold', type=int, help="binarization threshold")
parser.add_argument('folder_path', type=str, help='folder path containing all of the splits')
parser.add_argument('output_path', type=str, help='output folder path')
parser.add_argument('num', type=int,choices=[2,4,6,8], help='number of binarization operations')
parser.add_argument('-m', '--in_memory', type=bool, default=True, help='in memory computation')
args = parser.parse_args()
nibRDD = sc.binaryFiles(args.folder_path)\
.map(lambda x: get_data(x))
client = Config().get_client('dev')
if args.in_memory == 'True':
print "Performing in-memory computations"
for i in xrange(num - 1):
nibRDD = nibRDD.map(lambda x: binarize(x, args.threshold))
nibRDD = nibRDD.map(lambda x: binarize_and_save(x, args.threshold, args.output_path, client)).collect()
else:
print "Writing intermediary results to disk and loading from disk"
binRDD = nibRDD.map(lambda x: binarize_and_save(x, args.threshold, args.output_path + "1", client)).collect()
for i in xrange(num - 1):
binRDD = sc.binaryFiles(args.output_path + "1")\
.map(lambda x: get_data(x))\
.map(lambda x: binarize_and_save(x, args.threshold, args.output_path + "1", client)).collect()
评论列表
文章目录