diff --git a/src/client.py b/src/client.py
index 65d83752f09889e0fcdd9d87dee0ea1f5354fa6c..fbb44f6b886a2bb818f4b5f611af6797ee9789cc 100755
--- a/src/client.py
+++ b/src/client.py
@@ -11,7 +11,7 @@ import logging #日志模块
 # values over max (and under min) will be clipped
 MAX_X = 2400
 MAX_Y = 1200
-MIN_Z = -460  # TODO test this one!
+MAX_Z = 469  # TODO test this one!
 
 def coord(x, y, z):
   return {"kind": "coordinate", "args": {"x": x, "y": y, "z": z}} # 返回json 嵌套对象
@@ -66,7 +66,7 @@ class FarmbotClient(object):
   def move(self, x, y, z):
     x = clip(x, 0, MAX_X)
     y = clip(y, 0, MAX_Y)
-    z = clip(z, MIN_Z, 0)
+    z = clip(z, 0, MAX_Z)
     status_ok = self._blocking_request(move_request(x, y, z)) # 发请求
     logging.info("MOVE (%s,%s,%s) [%s]", x, y, z, status_ok) #存日志,包括执行了什么“move x y z +返回值 ”
 
diff --git a/src/detect.py b/src/detect.py
index ea1c2dc85989052f919b867f52007b525aab6e11..5f3296376fb6e2aa708db354c7fd55e175fb0cd0 100644
--- a/src/detect.py
+++ b/src/detect.py
@@ -125,10 +125,6 @@ def detect(args: Namespace)-> None:
         darknet.print_detections(detections, args.ext_output)
         fps = int(1/(time.time() - prev_time))
         print("FPS: {}".format(fps))
-        if not args.dont_show:
-            cv2.imshow('Inference', resized_image)
-            if cv2.waitKey() & 0xFF == ord('q'):
-                break
         index += 1
 
 
@@ -143,11 +139,9 @@ if __name__ == "__main__":
                         help="number of images to be processed at the same time")
     parser.add_argument("--weights", default="../weights/yolov3-vattenhallen_best.weights",
                         help="yolo weights path")
-    parser.add_argument("--dont_show", action='store_true',
-                        help="windown inference display. For headless systems")
-    parser.add_argument("--ext_output", action='store_true',
+    parser.add_argument("--ext_output", action='store_true', default=True,
                         help="display bbox coordinates of detected objects")
-    parser.add_argument("--save_labels", action='store_true',
+    parser.add_argument("--save_labels", action='store_true', default=True,
                         help="save detections bbox for each image in yolo format")
     parser.add_argument("--config_file", default="../cfg/yolov3-vattenhallen-test.cfg",
                         help="path to config file")
diff --git a/src/location.py b/src/location.py
index 3cde737a785b17b3cd182438d6ec4259d08555a1..a9375b84dbbb4ded7fdec3647687408fca350369 100644
--- a/src/location.py
+++ b/src/location.py
@@ -98,12 +98,9 @@ def read_locations(locations_path: Path) -> Optional[ndarray]:
         return None
 
     number_files = len(listdir(locations_path))
-    if number_files > 1:
+    if number_files != 1:
         _LOG.error('More than one file of locations found the {}'.format(locations_path))
-        raise FileExistsError
-    elif number_files == 0:
-        _LOG.error('No file of locations found the {}'.format(locations_path))
-        raise FileNotFoundError
+        return None
 
     locations_file = Path(locations_path, [file for file in listdir(locations_path)][0])    
     if not locations_file.suffix.lstrip('.') in _LOCATIONS_EXTENSIONS:
@@ -167,15 +164,13 @@ def cal_location(args: Namespace) -> ndarray:
     main function for this script
     '''
     cam_offset, gripper_offset = read_offsets(args.offset)
-    K_matrix = load_cam_matrix(args.camera_matrix) 
-    list_location = read_locations(args.locations) # 如果没有文件,会报错
+    K_matrix = load_cam_matrix(args.camera_matrix)
+    list_location = read_locations(args.locations) 
     # iterate over each annotation file
     _LOG.info('Global coordinate calculation begins.')
     list_annotations = listdir(args.annotations)
     # sort by chronological order  / specific for the filename on Ziliang's PC, change if other names
-    time_label = lambda x: int(x.split('_')[1]+x.split('_')[2]
-                              +x.split('_')[3]+x.split('_')[4])
-    list_annotations.sort(key=time_label)
+    list_annotations.sort() 
     # read annotations
     for index_photo, annotation_file in enumerate(list_annotations):
         filepath = Path(args.annotations, annotation_file)
@@ -214,6 +209,7 @@ def cal_location(args: Namespace) -> ndarray:
                                 list_location[index_photo], cam_offset, gripper_offset)
             list_global_coordinate.append([category, global_x, global_y, confidence])     
             _LOG.debug(list_global_coordinate[-1])   
+    
     _LOG.info('Global coordinate calculation is done.')
     return array(list_global_coordinate)
 
diff --git a/src/main.py b/src/main.py
index 0cae31e4380970b8f97a83b64aa79859858b1fc3..c68b26834577376f28ee6adbdeb8bf66f9ee69db 100644
--- a/src/main.py
+++ b/src/main.py
@@ -17,10 +17,10 @@ from move import *
 from detect import *
 from location import *
 
-_Log = getLogger(__name__)
+_LOG = getLogger(__name__)
 
-GRIP_Z = -200 # measure!
-SCAN_Z = -100
+GRIP_Z = 468 # measure!
+SCAN_Z = 0
 ORIGIN_X = 0
 ORIGIN_Y = 0
 ORIGIN_Z = 0
@@ -34,13 +34,12 @@ def remove_overlap(table_coordinate:DataFrame, tolerance=50.00)->DataFrame:
     :param table_coordinate: pandas dataframe that each row corresponds to a target [class, x, y, confidence]
     :param tolerance: a distance threshold
     '''
-    # 这又要求重写前面的了,加入 probability
     num_coordinates, num_col = table_coordinate.shape
     for i in range(num_coordinates-1):
         x, y, confidence = table_coordinate.loc[i, ['x','y', 'confidence']]
         for j in range(i+1, num_coordinates):
                 x_j, y_j, confidence_j = table_coordinate.loc[j, ['x','y', 'confidence']]
-                distance = sqrt((x-x_j)*(x-x_j)+(y-y_j)*(y-y_j))  # 用map优化?
+                distance = sqrt((float(x)-float(x_j))*(float(x)-float(x_j)) + (float(y)-float(y_j))*(float(y)-float(y_j)))  
                 if distance <= tolerance:
                     if confidence < confidence_j:
                         table_coordinate.drop(i)
@@ -62,19 +61,29 @@ def remove_temp(path: Path)-> None:
 
 
 def main(args: Namespace):
+    # clean temporary files
+    remove_temp(args.input)
+    remove_temp(args.locations)
+    remove_temp(args.annotations)
     # start from the origin
-    simple_move(0, 0, 0)
+    simple_move(ORIGIN_X, ORIGIN_Y, ORIGIN_Z)
+    _LOG.info("Go back to the origin")
     # scan
-    scan(flag=False)
+    scan(args.photo, args.locations, flag=False)
+    _LOG.info("Scan the planting bed")
     # detect
     detect(args)
+    _LOG.info("Detection is done")
     # calculate locations
     list_global_coordinate = cal_location(args)
+    _LOG.info("Global coordinate calculation is done.")
     # choose class
     table_global_coordinate = DataFrame(list_global_coordinate, columns=['class', 'x', 'y', 'confidence'])
     # remove overlap
+    print(table_global_coordinate)
     table_global_coordinate = remove_overlap(table_global_coordinate)
     goal_class = table_global_coordinate[table_global_coordinate['class']==args.category]
+    _LOG.info("Choose {}".format(args.category))
     # if there is no desiered class of plants
     if goal_class.empty:
         _LOG.info("There is no {}".format(args.category))
@@ -89,10 +98,6 @@ def main(args: Namespace):
         # go back to the orgin
         simple_move(x, y, GRIP_Z, False)
         gripper_open()
-    # clean temporary files if all the previous step work
-    remove_temp(args.input)
-    remove_temp(args.locations)
-    remove_temp(args.annotations)
     return
 
 
@@ -100,6 +105,14 @@ def main(args: Namespace):
 
 if __name__ == '__main__':
     parser = ArgumentParser(description="YOLOv3 detection on Farmbot")
+    # parsers for move
+    parser.add_argument(
+        '-p',
+        '--photo',
+        type=Path,
+        default="../img",
+        help='Mode for FarmBot, 1 for simple move with an assigned detination, 2 for scaning' 
+    )
     # parsers for detect
     parser.add_argument("--input", type=str, default="../img",
                         help="image source. It can be a single image, a"
@@ -110,11 +123,9 @@ if __name__ == '__main__':
                         help="number of images to be processed at the same time")
     parser.add_argument("--weights", default="../weights/yolov3-vattenhallen_best.weights",
                         help="yolo weights path")
-    parser.add_argument("--dont_show", action='store_true',
-                        help="windown inference display. For headless systems")
-    parser.add_argument("--ext_output", action='store_true',
+    parser.add_argument("--ext_output", action='store_true', default=True,
                         help="display bbox coordinates of detected objects")
-    parser.add_argument("--save_labels", action='store_true',
+    parser.add_argument("--save_labels", action='store_true', default=True,
                         help="save detections bbox for each image in yolo format")
     parser.add_argument("--config_file", default="../cfg/yolov3-vattenhallen-test.cfg",
                         help="path to config file")
@@ -126,7 +137,7 @@ if __name__ == '__main__':
     parser.add_argument(
         '-ca',
         '--category',
-        type=str,
+        type=int,
         help='Choose the class of fruits to be picked up. There are tomato, mushroom,\
         potato, carrot, beetroot, zucchini, hand'
     )
diff --git a/src/move.py b/src/move.py
index 77788f67bed9f5a62615d43ef5549ac4c917933e..3768b918c06a894d58bd0311faac64012f0eed35 100644
--- a/src/move.py
+++ b/src/move.py
@@ -39,7 +39,8 @@ class Opts:
         self.flag = flag
     
 
-def scan(img_path: Path, min_x=0, max_x=1300, min_y=0, max_y=1000, delta=500, offset=0, flag=True) -> List: #里面的数字需要重新测量
+def scan(img_path: Path, location_path: Path, # smaller delta
+         min_x=0, max_x=1300, min_y=0, max_y=1000, delta=1000, offset=0, flag=True) -> List: #里面的数字需要重新测量
     '''
     scan the bed at a certain height, first move along x axis, then y, like a zig zag;
     Taking pictures and record the location of the camera that corresponds to the picture
@@ -77,7 +78,11 @@ def scan(img_path: Path, min_x=0, max_x=1300, min_y=0, max_y=1000, delta=500, of
         client.move(x, y, _SWEEEP_HEIGHT) # move camera
         take_photo(img_path)
     client.shutdown()
-    return pts # location应该写入文件 img/location
+    # write to img/location
+    with open(path.join(location_path, "location.txt"), 'w') as f:
+        for postion in pts:
+            f.write('{} {} {}\n'.format(postion[0], postion[1], _SWEEEP_HEIGHT))
+    return None 
 
 
 def take_photo(img_path: Path):
@@ -124,6 +129,13 @@ if __name__ == '__main__':
         default="../img",
         help='Mode for FarmBot, 1 for simple move with an assigned detination, 2 for scaning' 
     )
+    parser.add_argument(
+        '-loc',
+        '--locations',
+        type=Path,
+        default='../img/locations/',
+        help='the path to txt files contains locations from encoders corresponds to each photo'
+    )
     parser.add_argument('-v', '--verbose', action='store_true', help='Verbose mode')
     arguments = parser.parse_args()
     
@@ -138,8 +150,7 @@ if __name__ == '__main__':
         simple_move(destination_x, destination_y, destination_z, photo)
         Logger.info(f'time cost {time()-simple_move_start}')
     elif arguments.mode == 2:
-        simple_move(1000,0, -200)
-        #scan(flag=False)
+        scan(arguments.photo, arguments.locations, flag=False)
     else:
         Logger.error('Wrong mode number {arguments.mode}')