diff --git a/src/mio.py b/src/mio.py
index 7a1018c28bfd1f862a380ab3b21efee30983c465..0851a0d1d0defb05b8481b45ed89c8834c6526bb 100755
--- a/src/mio.py
+++ b/src/mio.py
@@ -21,7 +21,7 @@ import locale
 def parse_with_path(path, filename, repository=None):
     f = mio.filecache.loadfile(path, filename)
     tree = mio.parser.parse(f.filename, url=f.key)
-    
+
     join = None
     if tree._tag == "comps":
         # Transform comps to canonical form
@@ -89,6 +89,9 @@ class FindAndParse:
             if target.startswith("/"):
                 m = re.match("^/([^/]*)", target)
                 OK |= self.parse_with_path(p, "%s.mio" % (m.group(1)), builder, target)
+                if OK:
+                    # Only first mio file found is considered 
+                    break
                 pass
             elif target.startswith("@"):
                 filename = "base/comps.xml"
@@ -107,6 +110,9 @@ class FindAndParse:
                 pass
             else:
                 OK |= self.parse_with_path(p, "hostinfo.xml", builder, target)
+                if OK:
+                    # Only first hostinfo.xml file found is considered
+                    break
                 pass
         
         if OK:
diff --git a/src/mio/filecache.py b/src/mio/filecache.py
index fa429aa19d97879b758c1cb67058659daf8e8cff..f5c79b14d30ff9a9698f79122b04edd1dd598505 100755
--- a/src/mio/filecache.py
+++ b/src/mio/filecache.py
@@ -7,6 +7,8 @@ import re
 import sys
 import tempfile
 import time
+from urllib.parse import urlparse, urlunparse
+
 try:
     # Python 3
     from urllib.parse import urlsplit, urlunsplit, unquote as urlunquote
@@ -25,10 +27,6 @@ except ImportError:
     import mio.urlgrabber_compat as pycurl
     pass
         
-temp = []
-cache = {}
-mirror = {}
-
 def subpath(path, *tail):
     result = []
     for p in path:
@@ -121,85 +119,129 @@ class Mirror:
         except urlgrabber.grabber.URLGrabError as e:
             raise IOError("Failed to get '%s' (%s)" % (path, e))
             raise
-            
-            
-def expand_mirror(path):
-    def unique_id(path):
-        all = set(path)
-        N = 0
-        while True:
-            N = N + 1
-            result = {}
-            test = set()
-            for p in all:
-                name = '__'.join(p.split('/')[-N:])
-                if name in test:
-                    break
-                test.update(name)
-                result[p] = name
-            break
-        return result
-                
-    class Info:
-        def __init__(self, id, path, urls):
-            self.id = id
-            self.path = path
-            self.urls = urls
-            pass
-        def __repr__(self):
-            return 'Repo(id=%s, path=%s, urls=%s)' % (self.id, 
-                                                      self.path, 
-                                                      self.urls)
         pass
-    result = []
-    id = unique_id(path)
-    for p in path:
-        result.append(Info(id[p], p, mirror[p].urls))
 
-    return result
+    def __repr__(self):
+        return 'Mirror(%s)' % self.path
 
-def cleanup():
-    for f in temp:
-        if os.path.isdir(f):
-            os.system("rm -rf %s" % f)
-        else:
-            os.remove(f)
+    pass
+
+class Loader:
+
+    def __init__(self, mirror, prefix):
+        self.mirror = mirror
+        self.prefix = prefix
+        pass
+
+    def urlopen(self, path):
+        fullpath = os.path.normpath('%s/%s' % (self.prefix, path))
+        return self.mirror.urlopen(fullpath)
+
+    pass
         
-def createfile():
-    f = open(tempfile.mktemp(), 'wb')
-    temp.append(f.name)
-    os.chmod(f.name, 0o700)
-    return f
-
-def createdir():
-    d = tempfile.mktemp()
-    os.mkdir(d, 0o700)
-    temp.append(d)
-    return d
-    
-    
-def loadfile(path, *name):
-    key = Key(path, os.path.normpath('/'.join(name)))
-    if not key in cache:
-        if not path in mirror:
-            mirror[path] = Mirror(key.path)
+class FileCache:
+
+    def __init__(self):
+        self.loader = {}
+        self.mirror = {}
+        self.cache = {}
+        self.tempfiles = []
+        pass
+
+    def cleanup(self):
+        for f in self.tempfiles:
+            os.remove(f)
             pass
-        u = mirror[path].urlopen(key.name)
-        if u.scheme == 'file':
-            p = urlunquote(urlsplit(u.geturl()).path)
-            cache[key] = CacheEntry(p, key)
+        pass
+
+    def createfile(self):
+        f = open(tempfile.mktemp(), 'wb')
+        self.tempfiles.append(f.name)
+        os.chmod(f.name, 0o700)
+        return f
+
+    def loadfile(self, path, *name):
+        if  not path in self.loader:
+            # Create loader for new path
+            up = urlparse(path)
+            url = urlunparse((up.scheme, up.netloc,'','','',''))
+            mirror_key = (up.scheme, up.netloc)
+            if not url in self.mirror:
+                self.mirror[url] = Mirror(url)
+                pass
+            prefix = up.path
+            self.loader[path] = Loader(self.mirror[url], prefix)
             pass
-        else:
-            mtime = u.curl_obj.getinfo(pycurl.INFO_FILETIME)
-            f = createfile()
-            f.write(u.read())
-            u.close()
-            f.close()
-            os.utime(f.name, (mtime, mtime))
-            cache[key] = CacheEntry(f.name, key)
+
+        key = Key(path, os.path.normpath('/'.join(name)))
+        if not key in self.cache:
+            u = self.loader[path].urlopen(key.name)
+            if u.scheme == 'file':
+                p = urlunquote(urlsplit(u.geturl()).path)
+                self.cache[key] = CacheEntry(p, key)
+                pass
+            else:
+                mtime = u.curl_obj.getinfo(pycurl.INFO_FILETIME)
+                f = self.createfile()
+                f.write(u.read())
+                u.close()
+                f.close()
+                os.utime(f.name, (mtime, mtime))
+                self.cache[key] = CacheEntry(f.name, key)
+                pass
             pass
-        pass
-    return cache[key]
+        return self.cache[key]
+
+    def expand_mirror(self, path):
+        def unique_id(path):
+            all = set(path)
+            N = 0
+            while True:
+                N = N + 1
+                result = {}
+                test = set()
+                for p in all:
+                    name = '__'.join(p.split('/')[-N:])
+                    if name in test:
+                        # Try a longer id to make it unique
+                        break
+                    test.add(name)
+                    result[p] = name
+                    pass
+                if len(test) == len(all):
+                    break
+                pass
+            return result
+
+        class Info:
+            def __init__(self, id, path, urls):
+                self.id = id
+                self.path = path
+                self.urls = urls
+                pass
+
+            def __repr__(self):
+                return 'Repo(id=%s, path=%s, urls=%s)' % (
+                    self.id, self.path, self.self.urls)
+            pass
+
+        result = []
+        id = unique_id(path)
+        for p in path:
+            prefix = self.loader[p].prefix
+            urls = [ "%s%s" % (u, prefix) for u in self.loader[p].mirror.urls ]
+            result.append(Info(id[p], p, urls))
+            pass
+        return result
+
+    pass
+
+_cache = FileCache()
+loadfile = _cache.loadfile
+createfile = _cache.createfile
+expand_mirror = _cache.expand_mirror
+
+atexit.register(_cache.cleanup)
 
 def loadscript(path, *name):
     result = loadfile(path, *name)
@@ -210,4 +252,3 @@ def loadscript(path, *name):
 def localpath(prefix, *path):
     return os.path.normpath('/'.join(filter(None, [prefix] + list(path))))
 
-atexit.register(cleanup)