[Darwinfiles-cvs] portage/pym portage.py,1.3,1.4 portage_core.py,1.1.1.1,1.2 portage_core2.py,1.1.1.
Status: Alpha
Brought to you by:
jimmacr
|
From: <ji...@us...> - 2002-09-08 16:20:16
|
Update of /cvsroot/darwinfiles/portage/pym
In directory usw-pr-cvs1:/tmp/cvs-serv31897
Modified Files:
portage.py portage_core.py portage_core2.py portagedb.py
Log Message:
Merged with portage-2.0.36
Index: portage.py
===================================================================
RCS file: /cvsroot/darwinfiles/portage/pym/portage.py,v
retrieving revision 1.3
retrieving revision 1.4
diff -C2 -d -r1.3 -r1.4
*** portage.py 14 Aug 2002 02:54:57 -0000 1.3
--- portage.py 8 Sep 2002 16:20:08 -0000 1.4
***************
*** 3,7 ****
# Distributed under the GNU Public License v2
! VERSION="2.0.27"
from stat import *
--- 3,7 ----
# Distributed under the GNU Public License v2
! VERSION="2.0.36"
from stat import *
***************
*** 10,15 ****
import string,os,types,sys,shlex,shutil,xpak,fcntl,signal,time,cPickle,atexit,grp
- ## Note: missingos removed until it will compile and work :(
-
#Secpass will be set to 1 if the user is root or in the wheel group.
uid=os.getuid()
--- 10,13 ----
***************
*** 75,87 ****
starttime=int(time.time())
- #defined in doebuild as global
- #dont set this to [], as it then gets seen as a list variable
- #which gives tracebacks (usually if ctrl-c is hit very early)
- buildphase=""
-
- #the build phases for which sandbox should be active
- sandboxactive=["unpack","compile","clean","install","help","setup"]
- #if the exithandler triggers before features has been initialized, then it's safe to assume
- #that the sandbox isn't active.
features=[]
--- 73,76 ----
***************
*** 89,95 ****
def exithandler(foo,bar):
global features,secpass
! print "!!! Portage interrupted by SIGINT; exiting."
! #disable sandboxing to prevent problems
! #only do this if sandbox is in $FEATURES and we are root.
if (secpass==2) and ("sandbox" in features):
mypid=os.fork()
--- 78,82 ----
def exithandler(foo,bar):
global features,secpass
! #remove temp sandbox files
if (secpass==2) and ("sandbox" in features):
mypid=os.fork()
***************
*** 97,107 ****
myargs=[]
mycommand="/usr/lib/portage/bin/testsandbox.sh"
! #if we are in the unpack,compile,clean or install phases,
! #there will already be one sandbox running for this call
! #to emerge
! if buildphase in sandboxactive:
! myargs=["testsandbox.sh","1"]
! else:
! myargs=["testsandbox.sh","0"]
myenv={}
os.execve(mycommand,myargs,myenv)
--- 84,88 ----
myargs=[]
mycommand="/usr/lib/portage/bin/testsandbox.sh"
! myargs=["testsandbox.sh","0"]
myenv={}
os.execve(mycommand,myargs,myenv)
***************
*** 109,117 ****
sys.exit(1)
retval=os.waitpid(mypid,0)[1]
! print "PORTAGE: Checking for Sandbox ("+buildphase+")..."
! if retval==0:
! print "PORTAGE: No Sandbox running, deleting /etc/ld.so.preload!"
! if os.path.exists("/etc/ld.so.preload"):
! os.unlink("/etc/ld.so.preload")
# 0=send to *everybody* in process group
os.kill(0,signal.SIGKILL)
--- 90,96 ----
sys.exit(1)
retval=os.waitpid(mypid,0)[1]
! # if retval==0:
! # if os.path.exists("/tmp/sandboxpids.tmp"):
! # os.unlink("/tmp/sandboxpids.tmp")
# 0=send to *everybody* in process group
os.kill(0,signal.SIGKILL)
***************
*** 283,287 ****
pos=pos+1
! specials={"KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],"INFODIR":[],"ROOTPATH":[]}
env={}
--- 262,266 ----
pos=pos+1
! specials={"KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],"INFODIR":[],"ROOTPATH":[],"CONFIG_PROTECT":[],"CONFIG_PROTECT_MASK":[]}
env={}
***************
*** 342,351 ****
continue
outstring="export "+path+"='"
! for x in specials[path][:-1]:
! outstring=outstring+x+":"
outstring=outstring+specials[path][-1]+"'"
outfile.write(outstring+"\n")
- #get it out of the way
- # del specials[path]
#create /etc/profile.env
--- 321,332 ----
continue
outstring="export "+path+"='"
! if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
! for x in specials[path][:-1]:
! outstring += x+" "
! else:
! for x in specials[path][:-1]:
! outstring=outstring+x+":"
outstring=outstring+specials[path][-1]+"'"
outfile.write(outstring+"\n")
#create /etc/profile.env
***************
*** 797,802 ****
to work."""
! # we use the pid of the python instance to create a temp file
! # spawnebuild.sh and ebuild.sh uses to save/restore the environment.
settings["PORTAGE_MASTER_PID"]=`os.getpid()`
--- 778,782 ----
to work."""
! # usefull if an ebuild or so needs to get the pid of our python process
settings["PORTAGE_MASTER_PID"]=`os.getpid()`
***************
*** 805,821 ****
myargs=[]
if ("sandbox" in features) and (not free):
! #only run sandbox for the following phases
! if buildphase in sandboxactive:
! mycommand="/usr/lib/portage/bin/sandbox"
! if debug:
! myargs=["sandbox",mystring]
! else:
! myargs=["sandbox",mystring]
! else:
! mycommand="/bin/bash"
! if debug:
! myargs=["bash","-x","-c",mystring]
! else:
! myargs=["bash","-c",mystring]
else:
mycommand="/bin/bash"
--- 785,790 ----
myargs=[]
if ("sandbox" in features) and (not free):
! mycommand="/usr/lib/portage/bin/sandbox"
! myargs=["sandbox",mystring]
else:
mycommand="/bin/bash"
***************
*** 839,868 ****
return 16
- def ebuildsh(mystring,debug=0):
- "Spawn ebuild.sh, optionally in a sandbox"
-
- mylist=mystring.split()
- for x in mylist:
- global buildphase
- buildphase=x
-
- #here we always want to call spawn with free=0,
- #else the exit handler may not detect things properly
- retval=spawn("/usr/sbin/ebuild.sh "+x,debug)
-
- #reset it again
- buildphase=""
-
- #it is a batch call to ebuild.sh, so ebuild.sh should
- #load the saved environment on next call.
- settings["PORTAGE_RESTORE_ENV"]="1"
-
- if retval:
- settings["PORTAGE_RESTORE_ENV"]="0"
- return retval
-
- #reset it again
- settings["PORTAGE_RESTORE_ENV"]="0"
-
def fetch(myuris):
"fetch files. Will use digest file if available."
--- 808,811 ----
***************
*** 1095,1108 ****
settings["BUILD_PREFIX"]=settings["PORTAGE_TMPDIR"]+"/portage"
settings["PKG_TMPDIR"]=settings["PORTAGE_TMPDIR"]+"/portage-pkg"
! if mydo!="depend":
! #depend may be run as non-root
! settings["BUILDDIR"]=settings["BUILD_PREFIX"]+"/"+settings["PF"]
! if not os.path.exists(settings["BUILDDIR"]):
! os.makedirs(settings["BUILDDIR"])
! settings["T"]=settings["BUILDDIR"]+"/temp"
! if not os.path.exists(settings["T"]):
! os.makedirs(settings["T"])
! settings["WORKDIR"]=settings["BUILDDIR"]+"/work"
! settings["D"]=settings["BUILDDIR"]+"/image/"
if mydo=="unmerge":
--- 1038,1051 ----
settings["BUILD_PREFIX"]=settings["PORTAGE_TMPDIR"]+"/portage"
settings["PKG_TMPDIR"]=settings["PORTAGE_TMPDIR"]+"/portage-pkg"
! #depend may be run as non-root
! settings["BUILDDIR"]=settings["BUILD_PREFIX"]+"/"+settings["PF"]
! if not os.path.exists(settings["BUILDDIR"]) and mydo!="depend":
! os.makedirs(settings["BUILDDIR"])
! # Should be ok again to set $T, as sandbox do not depend on it
! settings["T"]=settings["BUILDDIR"]+"/temp"
! if not os.path.exists(settings["T"]) and mydo!="depend":
! os.makedirs(settings["T"])
! settings["WORKDIR"]=settings["BUILDDIR"]+"/work"
! settings["D"]=settings["BUILDDIR"]+"/image/"
if mydo=="unmerge":
***************
*** 1119,1129 ****
settings["KV"]=mykv
! # if any of these are being called, handle them and stop now.
if mydo in ["help","clean","setup","prerm","postrm","preinst","postinst","config"]:
! return ebuildsh(mydo,debug)
# get possible slot information from the deps file
if mydo=="depend":
myso=getstatusoutput("/usr/sbin/ebuild.sh depend")
return myso[0]
try:
--- 1062,1074 ----
settings["KV"]=mykv
! # if any of these are being called, handle them -- running them out of the sandbox -- and stop now.
if mydo in ["help","clean","setup","prerm","postrm","preinst","postinst","config"]:
! return spawn("/usr/sbin/ebuild.sh "+mydo,debug,free=1)
# get possible slot information from the deps file
if mydo=="depend":
myso=getstatusoutput("/usr/sbin/ebuild.sh depend")
+ if debug:
+ print myso[1]
return myso[0]
try:
***************
*** 1181,1190 ****
}
if mydo in actionmap.keys():
! return ebuildsh(actionmap[mydo],debug)
elif mydo=="qmerge":
#qmerge is specifically not supposed to do a runtime dep check
return merge(settings["CATEGORY"],settings["PF"],settings["D"],settings["BUILDDIR"]+"/build-info",myroot)
elif mydo=="merge":
! retval=ebuildsh("setup unpack compile install")
if retval: return retval
return merge(settings["CATEGORY"],settings["PF"],settings["D"],settings["BUILDDIR"]+"/build-info",myroot,myebuild=settings["EBUILD"])
--- 1126,1135 ----
}
if mydo in actionmap.keys():
! return spawn("/usr/sbin/ebuild.sh "+actionmap[mydo],debug)
elif mydo=="qmerge":
#qmerge is specifically not supposed to do a runtime dep check
return merge(settings["CATEGORY"],settings["PF"],settings["D"],settings["BUILDDIR"]+"/build-info",myroot)
elif mydo=="merge":
! retval=spawn("/usr/sbin/ebuild.sh setup unpack compile install")
if retval: return retval
return merge(settings["CATEGORY"],settings["PF"],settings["D"],settings["BUILDDIR"]+"/build-info",myroot,myebuild=settings["EBUILD"])
***************
*** 1211,1215 ****
return 0
else:
! return ebuildsh("setup unpack compile install package")
expandcache={}
--- 1156,1160 ----
return 0
else:
! return spawn("/usr/sbin/ebuild.sh setup unpack compile install package")
expandcache={}
***************
*** 1325,1338 ****
destorig=None
#copy destnew file into place
if sstat[ST_DEV]==dstat[ST_DEV]:
! #on the same fs
try:
os.rename(src,destnew)
except:
! print "!!! rename fail 1 on",src,"->",destnew
! if destorig:
! os.unlink(destorig)
! return None
! else:
#not on same fs
try:
--- 1270,1284 ----
destorig=None
#copy destnew file into place
+ trycopy=1
if sstat[ST_DEV]==dstat[ST_DEV]:
! #on the same fs; note that a bind mount of the same filesystem will show up
! #as the same filesystem, but os.rename won't work, so we need to detect this and
! #fall back to copy, below...
try:
os.rename(src,destnew)
+ trycopy=0
except:
! pass
! if trycopy:
#not on same fs
try:
***************
*** 1988,1995 ****
myslash=mycpv.split("/")
mysplit=pkgsplit(myslash[-1])
! if len(myslash)==2:
return myslash[0]+"/"+mysplit[0]
! else:
return mysplit[0]
def key_expand(mykey,mydb=None):
--- 1934,1944 ----
myslash=mycpv.split("/")
mysplit=pkgsplit(myslash[-1])
! mylen=len(myslash)
! if mylen==2:
return myslash[0]+"/"+mysplit[0]
! elif mylen==1:
return mysplit[0]
+ else:
+ return mysplit
def key_expand(mykey,mydb=None):
***************
*** 2027,2030 ****
--- 1976,1980 ----
myp=mysplit[0]
else:
+ # "foo" ?
myp=mycpv
mykey=None
***************
*** 2069,2073 ****
return origdep
! def dep_expand(mydep,mydb):
if not len(mydep):
return mydep
--- 2019,2023 ----
return origdep
! def dep_expand(mydep,mydb=None):
if not len(mydep):
return mydep
***************
*** 2119,2123 ****
return [1,[]]
else:
! mylist=dep_listcleanup(dep_zapdeps(mysplit,mysplit2))
mydict={}
for x in mylist:
--- 2069,2074 ----
return [1,[]]
else:
! mylist=flatten(dep_listcleanup(dep_zapdeps(mysplit,mysplit2)))
! #remove duplicates
mydict={}
for x in mylist:
***************
*** 2421,2431 ****
def counter_tick_core(myroot):
"This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
! edbpath=myroot+"var/cache/edb/"
! cpath=edbpath+"counter"
#We write our new counter value to a new file that gets moved into
#place to avoid filesystem corruption on XFS (unexpected reboot.)
-
- newcpath=edbpath+"counter.new"
if os.path.exists(cpath):
cfile=open(cpath, "r")
--- 2372,2379 ----
def counter_tick_core(myroot):
"This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
! cpath=myroot+"var/cache/edb/counter"
#We write our new counter value to a new file that gets moved into
#place to avoid filesystem corruption on XFS (unexpected reboot.)
if os.path.exists(cpath):
cfile=open(cpath, "r")
***************
*** 2433,2438 ****
counter=long(cfile.readline())
except ValueError:
! print "portage: COUNTER was corrupted; resetting to value of 9999"
! counter=counter=long(9999)
cfile.close()
else:
--- 2381,2386 ----
counter=long(cfile.readline())
except ValueError:
! counter=long(time.time())
! print "portage: COUNTER was corrupted; resetting to value of",counter
cfile.close()
else:
***************
*** 2441,2444 ****
--- 2389,2393 ----
counter += 1
# update new global counter file
+ newcpath=cpath+".new"
newcfile=open(newcpath,"w")
newcfile.write(str(counter))
***************
*** 2447,2451 ****
os.rename(newcpath,cpath)
return counter
-
cptot=0
--- 2396,2399 ----
***************
*** 2467,2470 ****
--- 2415,2446 ----
return counter_tick_core(self.root)
+ def cpv_counter(self,mycpv):
+ "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
+ cpath=self.root+"var/db/pkg/"+mycpv+"/COUNTER"
+
+ #We write our new counter value to a new file that gets moved into
+ #place to avoid filesystem corruption on XFS (unexpected reboot.)
+ corrupted=0
+ if os.path.exists(cpath):
+ cfile=open(cpath, "r")
+ try:
+ counter=long(cfile.readline())
+ except ValueError:
+ print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
+ counter=long(0)
+ corrupted=1
+ cfile.close()
+ else:
+ counter=long(0)
+ if corrupted:
+ newcpath=cpath+".new"
+ # update new global counter file
+ newcfile=open(newcpath,"w")
+ newcfile.write(str(counter))
+ newcfile.close()
+ # now move global counter file into place
+ os.rename(newcpath,cpath)
+ return counter
+
def cpv_inject(self,mycpv):
"injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
***************
*** 2485,2489 ****
mycpsplit=catpkgsplit(mycpv)
mynewcpv=newcp+"-"+mycpsplit[2]
! mynewcat=mycpsplit[0]
if mycpsplit[3]!="r0":
mynewcpv += "-"+mycpsplit[3]
--- 2461,2465 ----
mycpsplit=catpkgsplit(mycpv)
mynewcpv=newcp+"-"+mycpsplit[2]
! mynewcat=newcp.split("/")[0]
if mycpsplit[3]!="r0":
mynewcpv += "-"+mycpsplit[3]
***************
*** 2493,2502 ****
if not os.path.exists(self.root+"var/db/pkg/"+mynewcat):
#create the directory
! os.makedirs(self.root+"var/db/pkg"+mynewcat)
newpath=self.root+"var/db/pkg/"+mynewcpv
if os.path.exists(newpath):
#dest already exists; keep this puppy where it is.
continue
- print "portage: vardbapi: moving",mycpv,"to",mynewcpv
os.system("/bin/mv "+origpath+" "+newpath)
--- 2469,2477 ----
if not os.path.exists(self.root+"var/db/pkg/"+mynewcat):
#create the directory
! os.makedirs(self.root+"var/db/pkg/"+mynewcat)
newpath=self.root+"var/db/pkg/"+mynewcpv
if os.path.exists(newpath):
#dest already exists; keep this puppy where it is.
continue
os.system("/bin/mv "+origpath+" "+newpath)
***************
*** 2679,2700 ****
return 0
- def gettimeval(self,mycatpkg):
- """Get an integer time value that can be used to compare against other catpkgs; the timeval will try to use
- COUNTER but will also take into account the start time of Portage and use mtimes of CONTENTS files if COUNTER
- doesn't exist. The algorithm makes it safe to compare the timeval values of COUNTER-enabled and non-COUNTER
- db entries. Assumes mycatpkg exists."""
- global starttime
- rootp=self.root+"var/db/pkg/"+mycatpkg
- if not os.path.exists(rootp+"/COUNTER"):
- if not os.path.exists(rootp+"/CONTENTS"):
- return 0
- else:
- return os.stat(rootp+"/CONTENTS")[ST_MTIME]
- else:
- mycounterfile=open(rootp+"/COUNTER","r")
- mycountervar=string.atoi(string.split(mycounterfile.readline())[0])
- mycounterfile.close()
- return starttime+mycountervar
-
def populate(self):
self.populated=1
--- 2654,2657 ----
***************
*** 2741,2744 ****
--- 2698,2705 ----
mydbkey=dbcachedir+mycpv
mycsplit=catpkgsplit(mycpv)
+ if not mycsplit:
+ #invalid cpv specified
+ print "portage: aux_get():",mycpv,"is not a valid cat/pkg-v string."
+ raise KeyError
mysplit=mycpv.split("/")
myebuild=self.findname(mycpv)
***************
*** 2843,2849 ****
#due to a stale or regenerated cache entry, we need to update our internal dictionary....
self.auxcache[mycpv]={"mtime":dmtime}
! for x in range(0,len(auxdbkeys)):
! self.auxcache[mycpv][auxdbkeys[x]]=mylines[x][:-1]
!
#finally, we look at our internal cache entry and return the requested data.
returnme=[]
--- 2804,2813 ----
#due to a stale or regenerated cache entry, we need to update our internal dictionary....
self.auxcache[mycpv]={"mtime":dmtime}
! try:
! for x in range(0,len(auxdbkeys)):
! self.auxcache[mycpv][auxdbkeys[x]]=mylines[x][:-1]
! except IndexError:
! print "portage: aux_get(): error processing",auxdbkeys[x],"for",mycpv+"; exiting."
! sys.exit(1)
#finally, we look at our internal cache entry and return the requested data.
returnme=[]
***************
*** 2859,2862 ****
--- 2823,2829 ----
cps2=mykey.split("/")
cps=catpkgsplit(mykey,0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
if self.oroot:
if os.path.exists(self.oroot+"/"+cps[0]+"/"+cps[1]+"/"+cps2[1]+".ebuild") or os.path.exists(self.oroot+"/"+cps[0]+"/"+cps[1]+"/"+cps2[1]+".ebuild"):
***************
*** 2872,2875 ****
--- 2839,2844 ----
try:
for y in listdir(self.root+"/"+x):
+ if y=="CVS":
+ continue
biglist.append(x+"/"+y)
except:
***************
*** 2879,2882 ****
--- 2848,2853 ----
try:
for y in listdir(self.oroot+"/"+x):
+ if y=="CVS":
+ continue
mykey=x+"/"+y
if not mykey in biglist:
***************
*** 2991,2994 ****
--- 2962,2969 ----
mykey=newlist[0]
cpv=catpkgsplit(mykey)
+ if not cpv:
+ #invalid cat/pkg-v
+ print "portage: visible():",cpv,"is an invalid cat/pkg-v string. Aborting processing."
+ return []
mycp=cpv[0]+"/"+cpv[1]
if maskdict.has_key(mycp):
***************
*** 3209,3220 ****
for x in mystuff:
if x[-7:]==".ebuild":
! myebuildpath=self.dbdir+"/"+x+".ebuild"
break
#do prerm script
if myebuildpath and os.path.exists(myebuildpath):
a=doebuild(myebuildpath,"prerm",self.myroot)
- if a:
- print "!!! pkg_prerm() script failed; exiting."
- sys.exit(a)
mykeys=pkgfiles.keys()
--- 3184,3192 ----
for x in mystuff:
if x[-7:]==".ebuild":
! myebuildpath=self.dbdir+"/"+x
break
#do prerm script
if myebuildpath and os.path.exists(myebuildpath):
a=doebuild(myebuildpath,"prerm",self.myroot)
mykeys=pkgfiles.keys()
***************
*** 3251,3257 ****
lmtime=`lstatobj[ST_MTIME]`
#next line: we dont rely on mtimes for symlinks anymore.
! if (pkgfiles[obj][0] not in ("dir","fif","dev","sym")) and (lmtime != pkgfiles[obj][1]):
! print "--- !mtime", pkgfiles[obj][0], obj
! continue
if pkgfiles[obj][0]=="dir":
if not os.path.isdir(obj):
--- 3223,3232 ----
lmtime=`lstatobj[ST_MTIME]`
#next line: we dont rely on mtimes for symlinks anymore.
! try:
! if (pkgfiles[obj][0] not in ("dir","fif","dev","sym")) and (lmtime != pkgfiles[obj][1]):
! print "--- !mtime", pkgfiles[obj][0], obj
! continue
! except KeyError:
! print "--- !error",pkgfiles[obj][0],obj
if pkgfiles[obj][0]=="dir":
if not os.path.isdir(obj):
***************
*** 3462,3468 ****
if myebuildpath and os.path.exists(myebuildpath):
a=doebuild(myebuildpath,"postrm",self.myroot)
- if a:
- print "!!! pkg_postrm() script failed; exiting."
- sys.exit(a)
def treewalk(self,srcroot,destroot,inforoot,myebuild):
--- 3437,3440 ----
***************
*** 3492,3498 ****
else:
a=doebuild(inforoot+"/"+self.pkg+".ebuild","preinst",root)
- if a:
- print "!!! pkg_preinst() script failed; exiting."
- sys.exit(a)
# open CONTENTS file (possibly overwriting old one) for recording
outfile=open(inforoot+"/CONTENTS","w")
--- 3464,3467 ----
***************
*** 3563,3566 ****
--- 3532,3538 ----
#convert a specific virtual like dev-lang/python-2.2 to dev-lang/python
mysplit=catpkgsplit(mycatpkg)
+ if not mysplit:
+ print "portage: treewalk():",mycatpkg,"is an invalid PROVIDE entry; skipping."
+ continue
mycatpkg=mysplit[0]+"/"+mysplit[1]
if myvirts.has_key(mycatpkg):
***************
*** 3579,3585 ****
else:
a=doebuild(inforoot+"/"+self.pkg+".ebuild","postinst",root)
- if a:
- print "!!! pkg_postinst() script failed; exiting."
- sys.exit(a)
#update environment settings, library paths
env_update()
--- 3551,3554 ----
***************
*** 3695,3698 ****
--- 3664,3668 ----
print "!!!",mydest
elif S_ISREG(mydmode):
+ cfgprot=0
# install of destination is blocked by an existing regular file; now, config file
# management may come into play.
***************
*** 3701,3766 ****
# we have a protection path; enable config file management.
destmd5=perform_md5(mydest)
if cfgfiledict.has_key(myrealdest):
- #this file has been merged in the past, either as the original file or as a ._cfg extension of original.
- #we can skip the merging of this file. But we need to do one thing first, called "cycling". Let's say that
- #since the last merge on this file, the user has copied /etc/._cfg0000_foo to /etc/foo. The ._cfg had
- #position 4 in our md5 list (in cfgfiledict). Now that the file has been moved into place, we want to
- #*throw away* md5s 0-3. Reasoning? By doing this, we discard expired md5sums, and also allow a *new*
- #package to merge a "classic" version of the file (consider if the new version was buggy, so we reverted
- #to the original... without this important code, the new "original" would not get merged since it had
- #been merged before.
if destmd5 in cfgfiledict[myrealdest]:
! cfgfiledict[myrealdest]=cfgfiledict[myrealdest][cfgfiledict[myrealdest].index(destmd5):]
if mymd5==destmd5:
! #file already in place, so no need to merge this file. However, we need to update the
! #target file's times:
os.utime(mydest,(thismtime,thismtime))
zing="---"
moveme=0
elif cfgfiledict.has_key(myrealdest) and (mymd5 in cfgfiledict[myrealdest]):
! #ok, now that we've cycled cfgfiledict (see big paragraph above), it's safe to simply not merge this file
! #if it has been merged by us in the past. Thanks to the cycling, we can be do this with some assurance
! #that we are not being overly zealous in our desire to avoid merging files unnecessarily.
! zing="---"
moveme=0
else:
! #don't overwrite --
! # the files are not identical (from an md5 perspective); we cannot simply overwrite.
! pnum=-1
! # set pmatch to the literal filename only
! pmatch=os.path.basename(mydest)
! # config protection filename format:
! # ._cfg0000_foo
! # positioning (for reference):
! # 0123456789012
! mypfile=""
! for pfile in listdir(mydestdir):
! if pfile[0:5]!="._cfg":
! continue
! if pfile[10:]!=pmatch:
! continue
! try:
! newpnum=string.atoi(pfile[5:9])
! if newpnum>pnum:
! pnum=newpnum
! mypfile=pfile
! except:
! continue
! pnum=pnum+1
! # mypfile is set to the name of the most recent cfg management file currently on disk.
! # if their md5sums match, we overwrite the mypfile rather than creating a new .cfg file.
! # this keeps on-disk cfg management clutter to a minimum.
! cleanup=0
! if mypfile:
! pmd5=perform_md5(mydestdir+"/"+mypfile)
! if mymd5==pmd5:
! mydest=(mydestdir+"/"+mypfile)
! cleanup=1
! if not cleanup:
! # md5sums didn't match, so we create a new filename for merging.
! # we now have pnum set to the official 4-digit config that should be used for the file
! # we need to install. Set mydest to this new value.
! mydest=os.path.normpath(mydestdir+"/._cfg"+string.zfill(pnum,4)+"_"+pmatch)
! #add to our md5 list for future reference (will get written to /var/cache/edb/config)
if not cfgfiledict.has_key(myrealdest):
cfgfiledict[myrealdest]=[]
--- 3671,3698 ----
# we have a protection path; enable config file management.
destmd5=perform_md5(mydest)
+ cycled=0
if cfgfiledict.has_key(myrealdest):
if destmd5 in cfgfiledict[myrealdest]:
! #cycle
! del cfgfiledict[myrealdest]
! cycled=1
if mymd5==destmd5:
! #file already in place; simply update mtimes of destination
os.utime(mydest,(thismtime,thismtime))
zing="---"
moveme=0
+ elif cycled:
+ #mymd5!=destmd5 and we've cycled; move mysrc into place as a ._cfg file
+ moveme=1
+ cfgfiledict[myrealdest]=[mymd5]
+ cfgprot=1
elif cfgfiledict.has_key(myrealdest) and (mymd5 in cfgfiledict[myrealdest]):
! #myd5!=destmd5, we haven't cycled, and the file we're merging has been already merged previously
! zing="-o-"
moveme=0
else:
! #mymd5!=destmd5, we haven't cycled, and the file we're merging hasn't been merged before
! moveme=1
! cfgprot=1
if not cfgfiledict.has_key(myrealdest):
cfgfiledict[myrealdest]=[]
***************
*** 3770,3773 ****
--- 3702,3744 ----
if len(cfgfiledict[myrealdest])>16:
del cfgfiledict[myrealdest][0]
+
+ if cfgprot:
+ pnum=-1
+ # set pmatch to the literal filename only
+ pmatch=os.path.basename(mydest)
+ # config protection filename format:
+ # ._cfg0000_foo
+ # positioning (for reference):
+ # 0123456789012
+ mypfile=""
+ for pfile in listdir(mydestdir):
+ if pfile[0:5]!="._cfg":
+ continue
+ if pfile[10:]!=pmatch:
+ continue
+ try:
+ newpnum=string.atoi(pfile[5:9])
+ if newpnum>pnum:
+ pnum=newpnum
+ mypfile=pfile
+ except:
+ continue
+ pnum=pnum+1
+ # mypfile is set to the name of the most recent cfg management file currently on disk.
+ # if their md5sums match, we overwrite the mypfile rather than creating a new .cfg file.
+ # this keeps on-disk cfg management clutter to a minimum.
+ cleanup=0
+ if mypfile:
+ pmd5=perform_md5(mydestdir+"/"+mypfile)
+ if mymd5==pmd5:
+ mydest=(mydestdir+"/"+mypfile)
+ cleanup=1
+ if not cleanup:
+ # md5sums didn't match, so we create a new filename for merging.
+ # we now have pnum set to the official 4-digit config that should be used for the file
+ # we need to install. Set mydest to this new value.
+ mydest=os.path.normpath(mydestdir+"/._cfg"+string.zfill(pnum,4)+"_"+pmatch)
+ #add to our md5 list for future reference (will get written to /var/cache/edb/config)
+
# whether config protection or not, we merge the new file the same way. Unless moveme=0 (blocking directory)
if moveme:
***************
*** 3928,3933 ****
if not os.path.exists(root+"var/tmp"):
print ">>> "+root+"var/tmp doesn't exist, creating it..."
! os.mkdir(root+"var",0755)
! os.mkdir(root+"var/tmp",01777)
cachedirs=["/var/cache/edb"]
--- 3899,3911 ----
if not os.path.exists(root+"var/tmp"):
print ">>> "+root+"var/tmp doesn't exist, creating it..."
! try:
! os.mkdir(root+"var",0755)
! except (OSError,IOError):
! pass
! try:
! os.mkdir(root+"var/tmp",01777)
! except:
! print "portage: couldn't create /var/tmp; exiting."
! sys.exit(1)
cachedirs=["/var/cache/edb"]
***************
*** 3972,3976 ****
if not virts_p.has_key(vkeysplit[1]):
virts_p[vkeysplit[1]]=virts[x]
! del x
db["/"]={"virtuals":virts,"vartree":vartree("/",virts)}
if root!="/":
--- 3950,3954 ----
if not virts_p.has_key(vkeysplit[1]):
virts_p[vkeysplit[1]]=virts[x]
! del x
db["/"]={"virtuals":virts,"vartree":vartree("/",virts)}
if root!="/":
Index: portage_core.py
===================================================================
RCS file: /cvsroot/darwinfiles/portage/pym/portage_core.py,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -C2 -d -r1.1.1.1 -r1.2
Index: portage_core2.py
===================================================================
RCS file: /cvsroot/darwinfiles/portage/pym/portage_core2.py,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -C2 -d -r1.1.1.1 -r1.2
Index: portagedb.py
===================================================================
RCS file: /cvsroot/darwinfiles/portage/pym/portagedb.py,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -C2 -d -r1.1.1.1 -r1.2
|