/[gentoo-src]/portage/pym/portage.py.ldsopreload
Gentoo

Contents of /portage/pym/portage.py.ldsopreload

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.2 - (show annotations) (download)
Mon Jun 10 01:01:16 2002 UTC (12 years, 2 months ago) by azarah
Branch: MAIN
CVS Tags: HEAD
Changes since 1.1: +0 -0 lines
FILE REMOVED
ld.so.preload fixes

1 # Gentoo Linux Dependency Checking Code
2 # Copyright 1998-2000 Daniel Robbins, Gentoo Technologies, Inc.
3 # Distributed under the GNU Public License
4
5 # TO-DO:
6 # (I'm adding this here because I lose or forget about all my other Portage
7 # TO-DO files...
8 #
9 # rewrite download system
10 # -----------------------
11 # support partials, look into GENTOO_MIRRORS issue
12 #
13 # subpackages
14 # ===========
15 #src_install will work as normal, and will create the master image that includes
16 #everything in ${D}. There will be a new function, called src_subpkg that contains
17 #instructions for selecting files from ${D} and copying them to subpkg dirs, where
18 #they will get seperately packaged. The function will look something like this:
19 #
20 #src_subpkg() {
21 # subpkg bin
22 # #maybe grab should use regular expressions, not globbing?
23 # grab /usr/bin/* /usr/sbin/* /usr/lib/*.so
24 #
25 # subpkg dev
26 # grab /usr/lib/*.a (any way to say "everything but *.so"?)
27 #}
28 #
29 #Subpackage naming will work as follows. For a package foo-1.0, foo-1.0.tbz2
30 #will be the master package and include all subpackages. foo:dev-1.0.tbz2 will
31 #be the development package, and foo:run-1.0.tbz2 will be a runtime package,
32 #etc. It should be possible to simply treat them as unique package names with
33 #P="foo:dev" and P="foo:run" respectively.
34 #
35 #dep resolution needs to be upgraded a bit, though. "sys-apps/foo" will depend
36 #on the foo master package (i.e. foo-1.0.tbz2) for backwards compatibility. However,
37 #it will now also be possible to depend on "sys-apps/foo:dev" or "sys-apps/foo:run",
38 #and the dep system needs to be upgraded so that it knows how to satisfy these
39 #dependencies. This should allow the new subpackages system to be integrated
40 #seamlessly into our existing dependency hierarchy.
41 #
42 #Note: It may also be a good idea to allow a make.conf option so that "sys-apps/foo:run"
43 #automatically resolves to the master package (for those who prefer complete packages
44 #rather than installing things piecemeal; a great idea for development boxes where many
45 #things will depend on "sys-apps/foo:dev" for headers, but the developer may want the
46 #whole enchilada. (generally, I prefer this approach, though for runtime-only systems
47 #subpackages make a lot of sense).
48
49 VERSION="1.9.13"
50
51 import string,os
52 from stat import *
53 from commands import *
54 import types
55 import sys
56 import shlex
57 import shutil
58 import xpak
59 import re
60 import fcntl
61 import copy
62 import signal
63 import time
64 import missingos
65
66 try:
67 import fchksum
68 def perform_checksum(filename):
69 return fchksum.fmd5t(filename)
70
71 except ImportError:
72 import md5
73 def md5_to_hex(md5sum):
74 hexform = ""
75 for ix in xrange(len(md5sum)):
76 hexform = hexform + "%02x" % ord(md5sum[ix])
77 return(string.lower(hexform))
78
79 def perform_checksum(filename):
80 f = open(filename, 'rb')
81 blocksize=32768
82 data = f.read(blocksize)
83 size = 0L
84 sum = md5.new()
85 while data:
86 sum.update(data)
87 size = size + len(data)
88 data = f.read(blocksize)
89 return (md5_to_hex(sum.digest()),size)
90
91 starttime=int(time.time())
92
93 #defined in doebuild as global
94 #dont set this to [], as it then gets seen as a list variable
95 #which gives tracebacks (usually if ctrl-c is hit very early)
96 buildfase=""
97
98 #handle ^C interrupts correctly:
99 def exithandler(signum,frame):
100 print "!!! Portage interrupted by SIGINT; exiting."
101 #disable sandboxing to prevent problems
102 settings=config()
103 features=settings["FEATURES"].split()
104 #only do this if sandbox is in $FEATURES
105 if "sandbox" in features:
106 mypid=[]
107 mypid=os.fork()
108 if mypid==0:
109 myargs=[]
110 mycommand="/usr/lib/portage/bin/testsandbox.sh"
111 #if we are in the unpack,compile,clean or install fases,
112 #there will already be one sandbox running for this call
113 #to emerge
114 if buildfase in ["unpack","compile","clean","install"]:
115 myargs=["testsandbox.sh","1"]
116 else:
117 myargs=["testsandbox.sh","0"]
118 myenv={}
119 os.execve(mycommand,myargs,myenv)
120 os._exit(1)
121 sys.exit(1)
122 retval=os.waitpid(mypid,0)[1]
123 print "PORTAGE: Checking for Sandbox ("+buildfase+")..."
124 if retval==0:
125 print "PORTAGE: No Sandbox running, deleting /etc/ls.so.preload!"
126 if os.path.exists("/etc/ld.so.preload"):
127 os.unlink("/etc/ld.so.preload")
128 # 0=send to *everybody* in process group
129 os.kill(0,signal.SIGKILL)
130 sys.exit(1)
131 signal.signal(signal.SIGINT,exithandler)
132
133 def tokenize(mystring):
134 """breaks a string like 'foo? (bar) oni? (blah (blah))' into embedded lists; returns None on paren mismatch"""
135 tokens=string.split(mystring)
136 newtokens=[]
137 curlist=newtokens
138 prevlist=None
139 level=0
140 accum=""
141 for x in mystring:
142 if x=="(":
143 if accum:
144 curlist.append(accum)
145 accum=""
146 newlist=[]
147 curlist.append(newlist)
148 prevlist=curlist
149 curlist=newlist
150 level=level+1
151 elif x==")":
152 if accum:
153 curlist.append(accum)
154 accum=""
155 curlist=prevlist
156 if level==0:
157 return None
158 level=level-1
159 elif x in string.whitespace:
160 if accum:
161 curlist.append(accum)
162 accum=""
163 else:
164 accum=accum+x
165 if level!=0:
166 return None
167 if accum:
168 curlist.append(accum)
169 return newtokens
170
171 def evaluate(mytokens,mydefines,allon=0):
172 """removes tokens based on whether conditional definitions exist or not. Recognizes !"""
173 pos=0
174 if mytokens==None:
175 return None
176 while pos<len(mytokens):
177 if type(mytokens[pos])==types.ListType:
178 evaluate(mytokens[pos],mydefines)
179 if not len(mytokens[pos]):
180 del mytokens[pos]
181 continue
182 elif mytokens[pos][-1]=="?":
183 cur=mytokens[pos][:-1]
184 del mytokens[pos]
185 if allon:
186 if cur[0]=="!":
187 del mytokens[pos]
188 else:
189 if cur[0]=="!":
190 if ( cur[1:] in mydefines ) and (pos<len(mytokens)):
191 del mytokens[pos]
192 continue
193 elif ( cur not in mydefines ) and (pos<len(mytokens)):
194 del mytokens[pos]
195 continue
196 pos=pos+1
197 return mytokens
198
199 def flatten(mytokens):
200 """this function now turns a [1,[2,3]] list into a [1,2,3] list and returns it."""
201 newlist=[]
202 for x in mytokens:
203 if type(x)==types.ListType:
204 newlist.extend(flatten(x))
205 else:
206 newlist.append(x)
207 return newlist
208
209 #beautiful directed graph object
210
211 class digraph:
212 def __init__(self):
213 self.dict={}
214 #okeys = keys, in order they were added (to optimize firstzero() ordering)
215 self.okeys=[]
216
217 def addnode(self,mykey,myparent):
218 if not self.dict.has_key(mykey):
219 self.okeys.append(mykey)
220 if myparent==None:
221 self.dict[mykey]=[0,[]]
222 else:
223 self.dict[mykey]=[0,[myparent]]
224 self.dict[myparent][0]=self.dict[myparent][0]+1
225 return
226 if not myparent in self.dict[mykey][1]:
227 self.dict[mykey][1].append(myparent)
228 self.dict[myparent][0]=self.dict[myparent][0]+1
229
230 def delnode(self,mykey):
231 if not self.dict.has_key(mykey):
232 return
233 for x in self.dict[mykey][1]:
234 self.dict[x][0]=self.dict[x][0]-1
235 del self.dict[mykey]
236 while 1:
237 try:
238 self.okeys.remove(mykey)
239 except ValueError:
240 break
241
242 def firstzero(self):
243 "returns first node with zero references, or NULL if no such node exists"
244 for x in self.okeys:
245 if self.dict[x][0]==0:
246 return x
247 return None
248
249 def empty(self):
250 if len(self.dict)==0:
251 return 1
252 return 0
253
254 def hasnode(self,mynode):
255 return self.dict.has_key(mynode)
256
257 def copy(self):
258 mygraph=digraph()
259 for x in self.dict.keys():
260 mygraph.dict[x]=self.dict[x][:]
261 mygraph.okeys=self.okeys[:]
262 return mygraph
263
264 # valid end of version components; integers specify offset from release version
265 # pre=prerelease, p=patchlevel (should always be followed by an int), rc=release candidate
266 # all but _p (where it is required) can be followed by an optional trailing integer
267
268 endversion={"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1}
269 # as there's no reliable way to set {}.keys() order
270 # netversion_keys will be used instead of endversion.keys
271 # to have fixed search order, so that "pre" is checked
272 # before "p"
273 endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
274
275 #parse /etc/env.d and generate /etc/profile.env
276
277 def env_update():
278 global root
279 if not os.path.exists(root+"etc/env.d"):
280 prevmask=os.umask(0)
281 os.makedirs(root+"etc/env.d",0755)
282 os.umask(prevmask)
283 fns=os.listdir(root+"etc/env.d")
284 fns.sort()
285 pos=0
286 while (pos<len(fns)):
287 if fns[pos]<=2:
288 del fns[pos]
289 continue
290 if (fns[pos][0] not in string.digits) or (fns[pos][1] not in string.digits):
291 del fns[pos]
292 continue
293 pos=pos+1
294
295 specials={"KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],"INFODIR":[],"ROOTPATH":[]}
296 env={}
297
298 for x in fns:
299 # don't process backup files
300 if x[-1]=='~' or x[-4:]==".bak":
301 continue
302 myconfig=getconfig(root+"etc/env.d/"+x)
303 if myconfig==None:
304 print "!!! Parsing error in",root+"etc/env.d/"+x
305 #parse error
306 continue
307 # process PATH, CLASSPATH, LDPATH
308 for myspec in specials.keys():
309 if myconfig.has_key(myspec):
310 if myspec=="LDPATH":
311 specials[myspec].extend(string.split(expand(myconfig[myspec]),":"))
312 else:
313 specials[myspec].append(expand(myconfig[myspec]))
314 del myconfig[myspec]
315 # process all other variables
316 for myenv in myconfig.keys():
317 env[myenv]=expand(myconfig[myenv])
318
319 if os.path.exists(root+"etc/ld.so.conf"):
320 myld=open(root+"etc/ld.so.conf")
321 myldlines=myld.readlines()
322 myld.close()
323 oldld=[]
324 for x in myldlines:
325 #each line has at least one char (a newline)
326 if x[0]=="#":
327 continue
328 oldld.append(x[:-1])
329 oldld.sort()
330 # os.rename(root+"etc/ld.so.conf",root+"etc/ld.so.conf.bak")
331 # Where is the new ld.so.conf generated? (achim)
332 else:
333 oldld=None
334 specials["LDPATH"].sort()
335 if (oldld!=specials["LDPATH"]):
336 #ld.so.conf needs updating and ldconfig needs to be run
337 newld=open(root+"etc/ld.so.conf","w")
338 newld.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
339 newld.write("# contents of /etc/env.d directory\n")
340 for x in specials["LDPATH"]:
341 newld.write(x+"\n")
342 newld.close()
343 #run ldconfig here
344 print ">>> Regenerating "+root+"etc/ld.so.cache..."
345 getstatusoutput("/sbin/ldconfig -r "+root)
346 del specials["LDPATH"]
347
348 #create /etc/profile.env for bash support
349 outfile=open(root+"/etc/profile.env","w")
350
351 for path in specials.keys():
352 if len(specials[path])==0:
353 continue
354 outstring="export "+path+"='"
355 for x in specials[path][:-1]:
356 outstring=outstring+x+":"
357 outstring=outstring+specials[path][-1]+"'"
358 outfile.write(outstring+"\n")
359 #get it out of the way
360 # del specials[path]
361
362 #create /etc/profile.env
363 for x in env.keys():
364 if type(env[x])!=types.StringType:
365 continue
366 outfile.write("export "+x+"='"+env[x]+"'\n")
367 outfile.close()
368
369 #creat /etc/csh.env for (t)csh support
370 outfile=open(root+"/etc/csh.env","w")
371
372 for path in specials.keys():
373 if len(specials[path])==0:
374 continue
375 outstring="setenv "+path+" '"
376 for x in specials[path][:-1]:
377 outstring=outstring+x+":"
378 outstring=outstring+specials[path][-1]+"'"
379 outfile.write(outstring+"\n")
380 #get it out of the way
381 del specials[path]
382
383 #create /etc/csh.env
384 for x in env.keys():
385 if type(env[x])!=types.StringType:
386 continue
387 outfile.write("setenv "+x+" '"+env[x]+"'\n")
388 outfile.close()
389
390 def grabfile(myfilename):
391 """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
392 begins with a #, it is ignored, as are empty lines"""
393
394 try:
395 myfile=open(myfilename,"r")
396 except IOError:
397 return []
398 mylines=myfile.readlines()
399 myfile.close()
400 newlines=[]
401 for x in mylines:
402 #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
403 #into single spaces.
404 myline=string.join(string.split(x))
405 if not len(myline):
406 continue
407 if myline[0]=="#":
408 continue
409 newlines.append(myline)
410 return newlines
411
412 def grabdict(myfilename):
413 """This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary"""
414 newdict={}
415 try:
416 myfile=open(myfilename,"r")
417 except IOError:
418 return newdict
419 mylines=myfile.readlines()
420 myfile.close()
421 for x in mylines:
422 #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
423 #into single spaces.
424 myline=string.split(x)
425 if len(myline)<2:
426 continue
427 newdict[myline[0]]=myline[1:]
428 return newdict
429
430 def writedict(mydict,myfilename,writekey=1):
431 """Writes out a dict to a file; writekey=0 mode doesn't write out the key and assumes all values are strings,
432 not lists."""
433 try:
434 myfile=open(myfilename,"w")
435 except IOError:
436 return 0
437 if not writekey:
438 for x in mydict.values():
439 myfile.write(x+"\n")
440 else:
441 for x in mydict.keys():
442 myfile.write(x+" ")
443 for y in mydict[x]:
444 myfile.write(y+" ")
445 myfile.write("\n")
446 myfile.close()
447 return 1
448
449 def getconfig(mycfg,tolerant=0):
450 mykeys={}
451 f=open(mycfg,'r')
452 lex=shlex.shlex(f)
453 lex.wordchars=string.digits+string.letters+"~!@#$%*_\:;?,./-+{}"
454 lex.quotes="\"'"
455 while 1:
456 key=lex.get_token()
457 if (key==''):
458 #normal end of file
459 break;
460 equ=lex.get_token()
461 if (equ==''):
462 #unexpected end of file
463 #lex.error_leader(self.filename,lex.lineno)
464 if not tolerant:
465 print "!!! Unexpected end of config file: variable",key
466 return None
467 else:
468 return mykeys
469 elif (equ!='='):
470 #invalid token
471 #lex.error_leader(self.filename,lex.lineno)
472 if not tolerant:
473 print "!!! Invalid token (not \"=\")",equ
474 return None
475 else:
476 return mykeys
477 val=lex.get_token()
478 if (val==''):
479 #unexpected end of file
480 #lex.error_leader(self.filename,lex.lineno)
481 if not tolerant:
482 print "!!! Unexpected end of config file: variable",key
483 return None
484 else:
485 return mykeys
486 mykeys[key]=val
487 return mykeys
488
489 def expand(mystring,dictlist=[]):
490 """
491 new variable expansion code. Removes quotes, handles \n, etc, and
492 will soon use the dictlist to expand ${variable} references.
493 This code will be used by the configfile code, as well as others (parser)
494 This would be a good bunch of code to port to C.
495 """
496 mystring=" "+mystring
497 #in single, double quotes
498 insing=0
499 indoub=0
500 pos=1
501 newstring=" "
502 while (pos<len(mystring)):
503 if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
504 if (indoub):
505 newstring=newstring+"'"
506 else:
507 insing=not insing
508 pos=pos+1
509 continue
510 elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
511 if (insing):
512 newstring=newstring+'"'
513 else:
514 indoub=not indoub
515 pos=pos+1
516 continue
517 if (not insing):
518 #expansion time
519 if (mystring[pos]=="\\"):
520 #backslash expansion time
521 if (pos+1>=len(mystring)):
522 newstring=newstring+mystring[pos]
523 break
524 else:
525 a=mystring[pos+1]
526 pos=pos+2
527 if a=='a':
528 newstring=newstring+chr(007)
529 elif a=='b':
530 newstring=newstring+chr(010)
531 elif a=='e':
532 newstring=newstring+chr(033)
533 elif (a=='f') or (a=='n'):
534 newstring=newstring+chr(012)
535 elif a=='r':
536 newstring=newstring+chr(015)
537 elif a=='t':
538 newstring=newstring+chr(011)
539 elif a=='v':
540 newstring=newstring+chr(013)
541 else:
542 #remove backslash only, as bash does: this takes care of \\ and \' and \" as well
543 newstring=newstring+mystring[pos-1:pos]
544 continue
545 elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
546 pos=pos+1
547 if (pos+1)>=len(mystring):
548 return ""
549 if mystring[pos]=="{":
550 pos=pos+1
551 terminus="}"
552 else:
553 terminus=string.whitespace
554 myvstart=pos
555 while mystring[pos] not in terminus:
556 if (pos+1)>=len(mystring):
557 return ""
558 pos=pos+1
559 myvarname=mystring[myvstart:pos]
560 pos=pos+1
561 if len(myvarname)==0:
562 return ""
563 newstring=newstring+settings[myvarname]
564 else:
565 newstring=newstring+mystring[pos]
566 pos=pos+1
567 else:
568 newstring=newstring+mystring[pos]
569 pos=pos+1
570 return newstring[1:]
571
572 def autouse(myvartree):
573 "returns set of USE variables auto-enabled due to packages being installed"
574 if profiledir==None:
575 return ""
576 mylines=grabfile(profiledir+"/use.defaults")
577 if not mylines:
578 return ""
579 myusevars=""
580 for x in mylines:
581 mysplit=string.split(x)
582 if len(mysplit)<2:
583 #invalid line
584 continue
585 myuse=mysplit[0]
586 mydep=x[len(mysplit[0]):]
587 #check dependencies; tell depcheck() to ignore settings["USE"] since we are still forming it.
588 myresult=myvartree.depcheck(mydep,lookatuse=0)
589 if myresult[0]==1 and not myresult[1]:
590 #deps satisfied, add USE variable...
591 myusevars=myusevars+" "+myuse
592 return myusevars
593
594 # returns a tuple. (version[string], error[string])
595 # They are pretty much mutually exclusive.
596 # Either version is a string and error is none, or
597 # version is None and error is a string
598 #
599 def ExtractKernelVersion(base_dir):
600 pathname = os.path.join(base_dir, 'include/linux/version.h')
601 try:
602 lines = open(pathname, 'r').readlines()
603 except OSError, details:
604 return (None, str(details))
605 except IOError, details:
606 return (None, str(details))
607
608 lines = map(string.strip, lines)
609
610 version = ''
611
612 for line in lines:
613 items = string.split(line, ' ', 2)
614 if items[0] == '#define' and \
615 items[1] == 'UTS_RELEASE':
616 version = items[2] # - may be wrapped in quotes
617 break
618
619 if version == '':
620 return (None, "Unable to locate UTS_RELEASE in %s" % (pathname))
621
622 if version[0] == '"' and version[-1] == '"':
623 version = version[1:-1]
624 return (version,None)
625
626 class config:
627 def __init__(self):
628 self.configdict={}
629 self.configdict["origenv"]=os.environ.copy()
630 if os.environ.has_key('KV'):
631 pass
632 else:
633 (KV,err) = ExtractKernelVersion('/usr/src/linux')
634 if KV != None:
635 self.configdict["origenv"]['KV'] = KV
636 else:
637 pass
638 # print "!!! Couldn't extract kernel version - %s" % (err)
639
640 self.configdict["backupenv"]={}
641 if os.environ.has_key("FEATURES"):
642 self.configdict["backupenv"]["FEATURES"]=os.environ["FEATURES"]
643 if os.environ.has_key("USE"):
644 self.configdict["backupenv"]["USE"]=os.environ["USE"]
645 self.populated=0
646
647 def use_regenerate(self):
648 "regenerate USE variable -- dynamically taking into account any new packages installed (auto option)"
649 self.configdict["auto"]={}
650 self.configdict["auto"]["USE"]=autouse(db[root]["vartree"])
651 mykey="USE"
652 mydb=[]
653 for x in self.usevaluelist:
654 if self.configdict.has_key(x):
655 mydb.append(self.configdict[x])
656 self.regenerate(mykey,mydb)
657
658 def regenerate(self,mykey,myorigdb):
659 "dynamically regenerate a cumulative variable that may have changed"
660 if self.configdict["backupenv"].has_key(mykey):
661 self.configdict["env"][mykey]=self.configdict["backupenv"][mykey]
662 mysetting=[]
663 #copy our myorigdb so we don't modify it.
664 mydb=myorigdb[:]
665 #cycle backwards through the db entries
666 mydb.reverse()
667 for curdb in mydb:
668 if curdb.has_key(mykey):
669 #expand using only the current config file/db entry
670 mysplit=expand(curdb[mykey],curdb).split()
671 for x in mysplit:
672 if x=="-*":
673 # "-*" is a special "minus" var that means "unset all settings". so USE="-* gnome" will have *just* gnome enabled.
674 mysetting=[]
675 elif x[0]!="-":
676 if not x in mysetting:
677 mysetting.append(x)
678 else:
679 while x[1:] in mysetting:
680 mysetting.remove(x[1:])
681 self[mykey]=string.join(mysetting," ")
682
683 def populate(self):
684 self.configdict["conf"]=getconfig("/etc/make.conf")
685 self.configdict["globals"]=getconfig("/etc/make.globals")
686 self.configdict["env"]=self.configdict["origenv"].copy()
687 if not profiledir:
688 self.configdict["defaults"]={}
689 else:
690 self.configdict["defaults"]=getconfig(profiledir+"/make.defaults")
691 self.configlist=[self.configdict["env"],self.configdict["conf"],self.configdict["defaults"],self.configdict["globals"]]
692 self.populated=1
693 useorder=self["USE_ORDER"]
694 if not useorder:
695 #reasonable defaults; this is important as without USE_ORDER, USE will always be "" (nothing set)!
696 useorder="env:conf:auto:defaults"
697 self.usevaluelist=useorder.split(":")
698 # cumulative Portage variables with "-" support: USE and FEATURES
699 # use "standard" variable regeneration code to initially set the cumulative FEATURES variable
700 self.regenerate("FEATURES",self.configlist)
701 # use specialized code for regenerating the cumulative and dynamic USE setting.
702 self.use_regenerate()
703 # USE doesn't consult make.globals while FEATURES does.
704
705 def __getitem__(self,mykey):
706 if not self.populated:
707 self.populate()
708 if mykey=="CONFIG_PROTECT_MASK":
709 #Portage needs to always auto-update these files (so that builds don't die when remerging gcc)
710 returnme="/etc/env.d "
711 else:
712 returnme=""
713 for x in self.configlist:
714 if x.has_key(mykey):
715 returnme=returnme+expand(x[mykey],self.configlist)
716 #without this break, it concats all settings together -- interesting!
717 break
718 return returnme
719
720 def has_key(self,mykey):
721 if not self.populated:
722 self.populate()
723 for x in self.configlist:
724 if x.has_key(mykey):
725 return 1
726 return 0
727 def keys(self):
728 if not self.populated:
729 self.populate()
730 mykeys=[]
731 for x in self.configlist:
732 for y in x.keys():
733 if y not in mykeys:
734 mykeys.append(y)
735 return mykeys
736 def __setitem__(self,mykey,myvalue):
737 "set a value; will be thrown away at reset() time"
738 if not self.populated:
739 self.populate()
740 self.configlist[0][mykey]=myvalue
741
742 def reset(self):
743 "reset environment to original settings"
744 if not self.populated:
745 self.populate()
746 self.configdict["env"]=self.configdict["origenv"].copy()
747 #new code here
748 self.regenerate("FEATURES",self.configlist)
749 self.use_regenerate()
750
751 def environ(self):
752 "return our locally-maintained environment"
753 mydict={}
754 for x in self.keys():
755 mydict[x]=self[x]
756 return mydict
757
758 def spawn(mystring,debug=0,free=0):
759 """spawn a subprocess with optional sandbox protection,
760 depending on whether sandbox is enabled. The "free" argument,
761 when set to 1, will disable sandboxing. This allows us to
762 spawn processes that are supposed to modify files outside of the
763 sandbox. We can't use os.system anymore because it messes up
764 signal handling. Using spawn allows our Portage signal handler
765 to work."""
766 mypid=os.fork()
767 if mypid==0:
768 myargs=[]
769 if ("sandbox" in features) and (not free):
770 mycommand="/usr/lib/portage/bin/sandbox"
771 if debug:
772 myargs=["sandbox",mystring]
773 else:
774 myargs=["sandbox",mystring]
775 else:
776 mycommand="/bin/bash"
777 if debug:
778 myargs=["bash","-x","-c",mystring]
779 else:
780 myargs=["bash","-c",mystring]
781 os.execve(mycommand,myargs,settings.environ())
782 # If the execve fails, we need to report it, and exit
783 # *carefully*
784 # report error here
785 os._exit(1)
786 return # should never get reached
787 retval=os.waitpid(mypid,0)[1]
788 if (retval & 0xff)==0:
789 #return exit code
790 return (retval >> 8)
791 else:
792 #interrupted by signal
793 return 16
794
795 def ebuildsh(mystring,debug=0,free=0):
796 "spawns ebuild.sh more granular"
797 mylist=mystring.split()
798 for x in mylist:
799 global buildfase
800 buildfase=x
801 retval=spawn("/usr/sbin/ebuild.sh "+x)
802 if retval: return retval
803
804 def getmycwd():
805 "this handles situations where the current directory doesn't exist"
806 try:
807 a=os.getcwd()
808 except:
809 os.chdir("/")
810 a=os.getcwd()
811 return a
812
813 def fetch(myuris):
814 "fetch files. Will use digest file if available."
815 if ("mirror" in features) and ("nomirror" in settings["RESTRICT"].split()):
816 print ">>> \"mirror\" mode and \"nomirror\" restriction enabled; skipping fetch."
817 return 1
818 global thirdpartymirrors
819 mymirrors=settings["GENTOO_MIRRORS"].split()
820 fetchcommand=settings["FETCHCOMMAND"]
821 resumecommand=settings["RESUMECOMMAND"]
822 fetchcommand=string.replace(fetchcommand,"${DISTDIR}",settings["DISTDIR"])
823 resumecommand=string.replace(resumecommand,"${DISTDIR}",settings["DISTDIR"])
824 mydigests=None
825 digestfn=settings["FILESDIR"]+"/digest-"+settings["PF"]
826 if os.path.exists(digestfn):
827 myfile=open(digestfn,"r")
828 mylines=myfile.readlines()
829 mydigests={}
830 for x in mylines:
831 myline=string.split(x)
832 if len(myline)<2:
833 #invalid line
834 continue
835 mydigests[myline[2]]={"md5":myline[1],"size":string.atol(myline[3])}
836 if "fetch" in settings["RESTRICT"].split():
837 # fetch is restricted. Ensure all files have already been downloaded; otherwise,
838 # print message and exit.
839 gotit=1
840 for myuri in myuris:
841 myfile=os.path.basename(myuri)
842 try:
843 mystat=os.stat(settings["DISTDIR"]+"/"+myfile)
844 except OSError:
845 # file does not exist
846 print "!!!",myfile,"not found in",settings["DISTDIR"]+"."
847 gotit=0
848 break
849 if not gotit:
850 print ">>>",settings["EBUILD"],"has fetch restriction turned on."
851 print ">>> This probably means that this ebuild's files must be downloaded"
852 print ">>> manually. See the comments in the ebuild for more information."
853 return 0
854 return 1
855 locations=mymirrors[:]
856 filedict={}
857 for myuri in myuris:
858 myfile=os.path.basename(myuri)
859 if not filedict.has_key(myfile):
860 filedict[myfile]=[]
861 for y in range(0,len(locations)):
862 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
863 if myuri[:9]=="mirror://":
864 eidx = myuri.find("/", 9)
865 if eidx != -1:
866 mirrorname = myuri[9:eidx]
867 if thirdpartymirrors.has_key(mirrorname):
868 for locmirr in thirdpartymirrors[mirrorname]:
869 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
870 else:
871 filedict[myfile].append(myuri)
872 for myfile in filedict.keys():
873 locfetch=fetchcommand
874 docontinue=0
875 try:
876 mystat=os.stat(settings["DISTDIR"]+"/"+myfile)
877 if mydigests!=None and mydigests.has_key(myfile):
878 #if we have the digest file, we know the final size and can resume the download.
879 if mystat[ST_SIZE]<mydigests[myfile]["size"]:
880 print ">>> Resuming download..."
881 locfetch=resumecommand
882 else:
883 #we already have it downloaded, skip.
884 #if our file is bigger than the recorded size, digestcheck should catch it.
885 docontinue=1
886 pass
887 else:
888 #we don't have the digest file, but the file exists. Assume it is fully downloaded.
889 docontinue=1
890 pass
891 except OSError:
892 pass
893 if docontinue:
894 #you can't use "continue" when you're inside a "try" block
895 continue
896 gotit=0
897 for loc in filedict[myfile]:
898 print
899 print ">>> Downloading",loc
900 myfetch=string.replace(locfetch,"${URI}",loc)
901 myfetch=string.replace(myfetch,"${FILE}",myfile)
902 myret=spawn(myfetch,free=1)
903 if mydigests!=None and mydigests.has_key(myfile):
904 try:
905 mystat=os.stat(settings["DISTDIR"]+"/"+myfile)
906 if mystat[ST_SIZE]==mydigests[myfile]["size"]:
907 gotit=1
908 break
909 except OSError:
910 pass
911 else:
912 if not myret:
913 gotit=1
914 break
915 if not gotit:
916 print '!!! Couldn\'t download',myfile+". Aborting."
917 return 0
918 return 1
919
920 def digestgen(myarchives,overwrite=1):
921 """generates digest file if missing. Assumes all files are available. If
922 overwrite=0, the digest will only be created if it doesn't already exist."""
923 if not os.path.isdir(settings["FILESDIR"]):
924 os.makedirs(settings["FILESDIR"])
925 if "cvs" in features:
926 print ">>> Auto-adding files/ dir to CVS..."
927 spawn("cd "+settings["O"]+"; cvs add files",free=1)
928 myoutfn=settings["FILESDIR"]+"/.digest-"+settings["PF"]
929 myoutfn2=settings["FILESDIR"]+"/digest-"+settings["PF"]
930 if (not overwrite) and os.path.exists(myoutfn2):
931 return
932 print ">>> Generating digest file..."
933 outfile=open(myoutfn,"w")
934 for x in myarchives:
935 myfile=settings["DISTDIR"]+"/"+x
936 mymd5=perform_md5(myfile)
937 mysize=os.stat(myfile)[ST_SIZE]
938 #The [:-1] on the following line is to remove the trailing "L"
939 outfile.write("MD5 "+mymd5+" "+x+" "+`mysize`[:-1]+"\n")
940 outfile.close()
941 movefile(myoutfn,myoutfn2)
942 if "cvs" in features:
943 print ">>> Auto-adding digest file to CVS..."
944 spawn("cd "+settings["FILESDIR"]+"; cvs add digest-"+settings["PF"],free=1)
945 print ">>> Computed message digests."
946
947 def digestcheck(myarchives):
948 "Checks md5sums. Assumes all files have been downloaded."
949 if not myarchives:
950 #No archives required; don't expect a digest
951 return 1
952 digestfn=settings["FILESDIR"]+"/digest-"+settings["PF"]
953 if not os.path.exists(digestfn):
954 if "digest" in features:
955 print ">>> No message digest file found.",digestfn
956 print ">>> \"digest\" mode enabled; auto-generating new digest..."
957 digestgen(myarchives)
958 return 1
959 else:
960 print "!!! No message digest file found.",digestfn
961 print "!!! Type \"ebuild foo.ebuild digest\" to generate a digest."
962 return 0
963 myfile=open(digestfn,"r")
964 mylines=myfile.readlines()
965 mydigests={}
966 for x in mylines:
967 myline=string.split(x)
968 if len(myline)<2:
969 #invalid line
970 continue
971 mydigests[myline[2]]=[myline[1],myline[3]]
972 for x in myarchives:
973 if not mydigests.has_key(x):
974 if "digest" in features:
975 print ">>> No messages digest found for",x+"."
976 print ">>> \"digest\" mode enabled; auto-generating new digest..."
977 digestgen(myarchives)
978 return 1
979 else:
980 print "!!! No message digest found for",x+"."
981 print "!!! Type \"ebuild foo.ebuild digest\" to generate a digest."
982 return 0
983 mymd5=perform_md5(settings["DISTDIR"]+"/"+x)
984 if mymd5 != mydigests[x][0]:
985 print
986 print "!!!",x+": message digests do not match!"
987 print "!!!",x,"is corrupt or incomplete."
988 print ">>> our recorded digest:",mydigests[x][0]
989 print ">>> your file's digest:",mymd5
990 print ">>> Please delete",settings["DISTDIR"]+"/"+x,"and refetch."
991 print
992 return 0
993 else:
994 print ">>> md5 ;-)",x
995 return 1
996
997 # "checkdeps" support has been depreciated. Relying on emerge to handle it.
998 def doebuild(myebuild,mydo,myroot,debug=0):
999 global settings
1000 global buildfase
1001 buildfase=mydo
1002 if not os.path.exists(myebuild):
1003 print "!!! doebuild:",myebuild,"not found."
1004 return 1
1005 if myebuild[-7:]!=".ebuild":
1006 print "!!! doebuild: ",myebuild,"does not appear to be an ebuild file."
1007 return 1
1008 settings.reset()
1009 settings["PORTAGE_DEBUG"]=str(debug)
1010 #settings["ROOT"]=root
1011 settings["ROOT"]=myroot
1012 settings["STARTDIR"]=getmycwd()
1013 settings["EBUILD"]=os.path.abspath(myebuild)
1014 settings["O"]=os.path.dirname(settings["EBUILD"])
1015 settings["CATEGORY"]=os.path.basename(os.path.normpath(settings["O"]+"/.."))
1016 #PEBUILD
1017 settings["FILESDIR"]=settings["O"]+"/files"
1018 settings["PF"]=os.path.basename(settings["EBUILD"])[:-7]
1019 settings["ECLASSDIR"]=settings["PORTDIR"]+"/eclass"
1020 settings["SANDBOX_LOG"]=settings["PF"]
1021 mysplit=pkgsplit(settings["PF"],0)
1022 if mysplit==None:
1023 print "!!! Error: PF is null; exiting."
1024 return 1
1025 settings["P"]=mysplit[0]+"-"+mysplit[1]
1026 settings["PN"]=mysplit[0]
1027 settings["PV"]=mysplit[1]
1028 settings["PR"]=mysplit[2]
1029 if mysplit[2]=="r0":
1030 settings["PVR"]=mysplit[1]
1031 else:
1032 settings["PVR"]=mysplit[1]+"-"+mysplit[2]
1033 settings["SLOT"]=""
1034 if settings.has_key("PATH"):
1035 mysplit=string.split(settings["PATH"],":")
1036 else:
1037 mysplit=[]
1038 if not "/usr/lib/portage/bin" in mysplit:
1039 settings["PATH"]="/usr/lib/portage/bin:"+settings["PATH"]
1040
1041 if not settings.has_key("BUILD_PREFIX"):
1042 print "!!! Error: BUILD_PREFIX not defined."
1043 return 1
1044 settings["BUILDDIR"]=settings["BUILD_PREFIX"]+"/"+settings["PF"]
1045 if not os.path.exists(settings["BUILDDIR"]):
1046 os.makedirs(settings["BUILDDIR"])
1047 settings["T"]=settings["BUILDDIR"]+"/temp"
1048 if not os.path.exists(settings["T"]):
1049 os.makedirs(settings["T"])
1050 settings["WORKDIR"]=settings["BUILDDIR"]+"/work"
1051 settings["D"]=settings["BUILDDIR"]+"/image/"
1052
1053 if mydo=="unmerge":
1054 return unmerge(settings["CATEGORY"],settings["PF"],myroot)
1055
1056 if mydo not in ["help","clean","prerm","postrm","preinst","postinst","config","touch","setup",
1057 "depend","fetch","digest","unpack","compile","install","rpm","qmerge","merge","package"]:
1058 print "!!! Please specify a valid command."
1059 return 1
1060
1061 # if any of these are being called, stop now, handle them and stop now.
1062 if mydo in ["help","clean","prerm","postrm","preinst","postinst","config","touch","setup"]:
1063 return ebuildsh(mydo)
1064 #initial ebuild.sh bash environment configured
1065
1066 mydbkey="/var/cache/edb/dep/dep-"+os.path.basename(settings["EBUILD"])
1067 if (not os.path.exists(mydbkey)) or os.stat(mydbkey)[ST_MTIME]<os.stat(settings["EBUILD"])[ST_MTIME]:
1068 #cached info stale or non-existent
1069 myso=getstatusoutput("/usr/sbin/ebuild.sh depend")
1070 if myso[0]!=0:
1071 print "\n\n!!! Portage had a problem processing this file:"
1072 print "!!!",settings["EBUILD"]+"\n"+myso[1]+"\n"+"!!! aborting.\n"
1073 return 1
1074 if mydo=="depend":
1075 return 0
1076 # obtain the dependency, slot and SRC_URI information from the edb cache file
1077 a=open(mydbkey,"r")
1078 mydeps=eval(a.readline())
1079 a.close()
1080
1081 # get possible slot information from the deps file
1082 settings["SLOT"]=mydeps[2]
1083 settings["RESTRICT"]=mydeps[4]
1084 # it's fetch time
1085 myuris=mydeps[3]
1086 newuris=flatten(evaluate(tokenize(myuris),string.split(settings["USE"])))
1087 alluris=flatten(evaluate(tokenize(myuris),[],1))
1088 alist=[]
1089 aalist=[]
1090 #uri processing list
1091 upl=[[newuris,alist],[alluris,aalist]]
1092 for myl in upl:
1093 for x in myl[0]:
1094 mya=os.path.basename(x)
1095 if not mya in myl[1]:
1096 myl[1].append(mya)
1097 settings["A"]=string.join(alist," ")
1098 settings["AA"]=string.join(aalist," ")
1099 if "cvs" in features:
1100 fetchme=alluris
1101 checkme=aalist
1102 else:
1103 fetchme=newuris
1104 checkme=alist
1105
1106 if not fetch(fetchme):
1107 return 1
1108
1109 if mydo=="fetch":
1110 return 0
1111
1112 if "digest" in features:
1113 #generate digest if it doesn't exist.
1114 if mydo=="digest":
1115 digestgen(checkme,overwrite=1)
1116 return 0
1117 else:
1118 digestgen(checkme,overwrite=0)
1119 elif mydo=="digest":
1120 #since we are calling "digest" directly, recreate the digest even if it already exists
1121 digestgen(checkme,overwrite=1)
1122 return 0
1123
1124 if not digestcheck(checkme):
1125 return 1
1126
1127 #initial dep checks complete; time to process main commands
1128
1129 actionmap={ "unpack":"unpack",
1130 "compile":"setup unpack compile",
1131 "install":"setup unpack compile install",
1132 "rpm":"setup unpack compile install rpm"
1133 }
1134 if mydo in actionmap.keys():
1135 if "noauto" in features:
1136 return ebuildsh(mydo)
1137 else:
1138 return ebuildsh(actionmap[mydo])
1139 elif mydo=="qmerge":
1140 #qmerge is specifically not supposed to do a runtime dep check
1141 return merge(settings["CATEGORY"],settings["PF"],settings["D"],settings["BUILDDIR"]+"/build-info",myroot)
1142 elif mydo=="merge":
1143 retval=ebuildsh("setup unpack compile install")
1144 if retval: return retval
1145 return merge(settings["CATEGORY"],settings["PF"],settings["D"],settings["BUILDDIR"]+"/build-info",myroot,myebuild=settings["EBUILD"])
1146 elif mydo=="package":
1147 retval=ebuildsh("setup")
1148 if retval:
1149 return retval
1150 for x in ["","/"+settings["CATEGORY"],"/All"]:
1151 if not os.path.exists(settings["PKGDIR"]+x):
1152 os.makedirs(settings["PKGDIR"]+x)
1153 pkgloc=settings["PKGDIR"]+"/All/"+settings["PF"]+".tbz2"
1154 rebuild=0
1155 if os.path.exists(pkgloc):
1156 for x in [settings["A"],settings["EBUILD"]]:
1157 if not os.path.exists(x):
1158 continue
1159 if os.path.getmtime(x)>os.path.getmtime(pkgloc):
1160 rebuild=1
1161 break
1162 else:
1163 rebuild=1
1164 if not rebuild:
1165 print
1166 print ">>> Package",settings["PF"]+".tbz2 appears to be up-to-date."
1167 print ">>> To force rebuild, touch",os.path.basename(settings["EBUILD"])
1168 print
1169 return 0
1170 else:
1171 return ebuildsh("unpack compile install package")
1172
1173 def isfifo(x):
1174 mymode=os.lstat(x)[ST_MODE]
1175 if S_ISLNK(mymode):
1176 return 0
1177 return S_ISFIFO(mymode)
1178
1179 expandcache={}
1180
1181 def expandpath(realroot,mypath):
1182 """The purpose of this function is to resolve the 'real' path on disk, with all
1183 symlinks resolved except for the basename, since we may be installing a symlink
1184 and definitely don't want it expanded. In fact, the file that we want to install
1185 doesn't need to exist; just the dirname."""
1186 global expandcache
1187 split=string.split(mypath,"/")
1188 join=string.join(split[:-1],"/")
1189 try:
1190 return expandcache[join]+'/'+split[-1]
1191 except:
1192 pass
1193 expandcache[join]=os.path.realpath(join)
1194 return expandcache[join]
1195
1196 def movefile(src,dest,newmtime=None,sstat=None):
1197 """moves a file from src to dest, preserving all permissions and attributes; mtime will
1198 be preserved even when moving across filesystems. Returns true on success and false on
1199 failure. Move is atomic."""
1200
1201 #implementation note: we may want to try doing a simple rename() first, and fall back
1202 #to the "hard link shuffle" only if that doesn't work. We now do the hard-link shuffle
1203 #for everything.
1204
1205 try:
1206 dstat=os.lstat(dest)
1207 destexists=1
1208 except:
1209 #stat the directory for same-filesystem testing purposes
1210 dstat=os.lstat(os.path.dirname(dest))
1211 destexists=0
1212 if sstat==None:
1213 sstat=os.lstat(src)
1214 # symlinks have to be handled special
1215 if S_ISLNK(sstat[ST_MODE]):
1216 # if destexists, toss it, then call os.symlink, shutil.copystat(src,dest)
1217 # *real* src
1218 if destexists:
1219 try:
1220 os.unlink(dest)
1221 except:
1222 print "!!! couldn't unlink",dest
1223 # uh oh. oh well
1224 return None
1225
1226 try:
1227 real_src = os.readlink(src)
1228 except:
1229 print "!!! couldn't readlink",src
1230 return None
1231 try:
1232 os.symlink(real_src,dest)
1233 except:
1234 print "!!! couldn't symlink",real_src,"->",dest
1235 return None
1236 try:
1237 missingos.lchown(dest,sstat[ST_UID],sstat[ST_GID])
1238 except:
1239 print "!!! couldn't set uid/gid on",dest
1240 #the mtime of a symbolic link can only be set at create time.
1241 #thus, we return the mtime of the symlink (which was set when we created it)
1242 #so it can be recorded in the package db if necessary.
1243 return os.lstat(dest)[ST_MTIME]
1244
1245 if not destexists:
1246 if sstat[ST_DEV]==dstat[ST_DEV]:
1247 try:
1248 os.rename(src,dest)
1249 if newmtime:
1250 os.utime(dest,(newmtime,newmtime))
1251 return newmtime
1252 else:
1253 #renaming doesn't change mtimes, so we can return the source mtime:
1254 return sstat[ST_MTIME]
1255 except:
1256 return None
1257 else:
1258 if S_ISCHR(sstat[ST_MODE]) or S_ISBLK(sstat[ST_MODE]) or S_ISFIFO(sstat[ST_MODE]):
1259 #we don't yet handle special files across filesystems, so we need to fall back to /bin/mv
1260 a=getstatusoutput("/bin/mv -f "+"'"+src+"' '"+dest+"'")
1261 if a[0]!=0:
1262 return None
1263 #failure
1264 if newmtime:
1265 os.utime(dest, (newmtime,newmtime))
1266 return newmtime
1267 else:
1268 #get actual mtime from copied file, since we can't specify an mtime using mv
1269 finalstat=os.lstat(dest)
1270 return finalstat[ST_MTIME]
1271 #not on same fs and a regular file
1272 try:
1273 shutil.copyfile(src,dest)
1274 try:
1275 missingos.lchown(dest, sstat[ST_UID], sstat[ST_GID])
1276 except:
1277 print "!!! couldn't set uid/gid on",dest
1278 # do chmod after chown otherwise the sticky bits are reset
1279 os.chmod(dest, S_IMODE(sstat[ST_MODE]))
1280 if not newmtime:
1281 os.utime(dest, (sstat[ST_ATIME], sstat[ST_MTIME]))
1282 returnme=sstat[ST_MTIME]
1283 else:
1284 os.utime(dest, (newmtime,newmtime))
1285 returnme=newmtime
1286 os.unlink(src)
1287 return returnme
1288 except:
1289 #copy failure
1290 return None
1291 # destination exists, do our "backup plan"
1292 destnew=dest+"#new#"
1293 destorig=dest+"#orig#"
1294 try:
1295 # make a hard link backup
1296 os.link(dest,destorig)
1297 except:
1298 #backup failure
1299 print "!!! link fail 1 on",dest,"->",destorig
1300 destorig=None
1301 #copy destnew file into place
1302 if sstat[ST_DEV]==dstat[ST_DEV]:
1303 #on the same fs
1304 try:
1305 os.rename(src,destnew)
1306 except:
1307 print "!!! rename fail 1 on",src,"->",destnew
1308 if destorig:
1309 os.unlink(destorig)
1310 return None
1311 else:
1312 #not on same fs
1313 try:
1314 shutil.copyfile(src,destnew)
1315 except OSError, details:
1316 print '!!! copy',src,'->',destnew,'failed -',details
1317 return None
1318 except:
1319 #copy failure
1320 print "!!! copy fail 1 on",src,"->",destnew
1321 # gotta remove destorig *and* destnew
1322 if destorig:
1323 os.unlink(destorig)
1324 return None
1325 try:
1326 os.unlink(src)
1327 except:
1328 print "!!! unlink fail 1 on",src
1329 # gotta remove dest+#orig# *and destnew
1330 os.unlink(destnew)
1331 if destorig:
1332 os.unlink(destorig)
1333 return None
1334 #destination exists, destnew file is in place on the same filesystem
1335 #update ownership on destnew
1336 try:
1337 missingos.lchown(destnew, sstat[ST_UID], sstat[ST_GID])
1338 except:
1339 print "!!! couldn't set uid/gid on",dest
1340 #update perms on destnew
1341 # do chmod after chown otherwise the sticky bits are reset
1342 try:
1343 os.chmod(destnew, S_IMODE(sstat[ST_MODE]))
1344 except:
1345 print "!!! chmod fail on",dest
1346 #update times on destnew
1347 if not newmtime:
1348 try:
1349 os.utime(destnew, (sstat[ST_ATIME], sstat[ST_MTIME]))
1350 except:
1351 print "!!! couldn't set times on",destnew
1352 returnme=sstat[ST_MTIME]
1353 else:
1354 try:
1355 os.utime(destnew, (newmtime,newmtime))
1356 except:
1357 print "!!! couldn't set times on",destnew
1358 returnme=newmtime
1359 try:
1360 os.unlink(dest) # scary!
1361 except:
1362 # gotta remove destorig *and destnew
1363 print "!!! unlink fail 1 on",dest
1364 if destorig:
1365 os.unlink(destorig)
1366 os.unlink(destnew)
1367 return None
1368 try:
1369 os.rename(destnew,dest)
1370 except:
1371 #os.rename guarantees to leave dest in place if the rename fails.
1372 print "!!! rename fail 2 on",destnew,"->",dest
1373 os.unlink(destnew)
1374 return None
1375 try:
1376 if destorig:
1377 os.unlink(destorig)
1378 except:
1379 print "!!! unlink fail 1 on",destorig
1380 return returnme
1381
1382 def getmtime(x):
1383 return `os.lstat(x)[-2]`
1384
1385 def perform_md5(x):
1386 return perform_checksum(x)[0]
1387
1388 def pathstrip(x,mystart):
1389 cpref=os.path.commonprefix([x,mystart])
1390 return [root+x[len(cpref)+1:],x[len(cpref):]]
1391
1392 def merge(mycat,mypkg,pkgloc,infloc,myroot,myebuild=None):
1393 mylink=dblink(mycat,mypkg,myroot)
1394 if not mylink.exists():
1395 mylink.create()
1396 #shell error code
1397 mylink.merge(pkgloc,infloc,myroot,myebuild)
1398
1399 def unmerge(cat,pkg,myroot):
1400 mylink=dblink(cat,pkg,myroot)
1401 if mylink.exists():
1402 mylink.unmerge()
1403 mylink.delete()
1404
1405 def getenv(mykey,dictlist=[]):
1406 "dictlist contains a list of dictionaries to check *before* the environment"
1407 dictlist.append(os.environ)
1408 for x in dictlist:
1409 if x.has_key(mykey):
1410 return expand(x[mykey],dictlist)
1411 return ""
1412
1413 def relparse(myver):
1414 "converts last version part into three components"
1415 number=0
1416 p1=0
1417 p2=0
1418 mynewver=string.split(myver,"_")
1419 if len(mynewver)==2:
1420 #an endversion
1421 number=string.atof(mynewver[0])
1422 match=0
1423 for x in endversion_keys:
1424 elen=len(x)
1425 if mynewver[1][:elen] == x:
1426 match=1
1427 p1=endversion[x]
1428 try:
1429 p2=string.atof(mynewver[1][elen:])
1430 except:
1431 p2=0
1432 break
1433 if not match:
1434 #normal number or number with letter at end
1435 divider=len(myver)-1
1436 if myver[divider:] not in "1234567890":
1437 #letter at end
1438 p1=ord(myver[divider:])
1439 number=string.atof(myver[0:divider])
1440 else:
1441 number=string.atof(myver)
1442 else:
1443 #normal number or number with letter at end
1444 divider=len(myver)-1
1445 if myver[divider:] not in "1234567890":
1446 #letter at end
1447 p1=ord(myver[divider:])
1448 number=string.atof(myver[0:divider])
1449 else:
1450 number=string.atof(myver)
1451 return [number,p1,p2]
1452
1453 def revverify(myrev):
1454 if len(myrev)==0:
1455 return 0
1456 if myrev[0]=="r":
1457 try:
1458 string.atoi(myrev[1:])
1459 return 1
1460 except:
1461 pass
1462 return 0
1463
1464 #returns 1 if valid version string, else 0
1465 # valid string in format: <v1>.<v2>...<vx>[a-z,_{endversion}[vy]]
1466 # ververify doesn't do package rev.
1467
1468 def ververify(myorigval,silent=1):
1469 if len(myorigval)==0:
1470 if not silent:
1471 print "!!! Name error: package contains empty \"-\" part."
1472 return 0
1473 myval=string.split(myorigval,'.')
1474 if len(myval)==0:
1475 if not silent:
1476 print "!!! Name error: empty version string."
1477 return 0
1478 #all but the last version must be a numeric
1479 for x in myval[:-1]:
1480 if not len(x):
1481 if not silent:
1482 print "!!! Name error in",myorigval+": two decimal points in a row"
1483 return 0
1484 try:
1485 foo=string.atoi(x)
1486 except:
1487 if not silent:
1488 print "!!! Name error in",myorigval+": \""+x+"\" is not a valid version component."
1489 return 0
1490 if not len(myval[-1]):
1491 if not silent:
1492 print "!!! Name error in",myorigval+": two decimal points in a row"
1493 return 0
1494 try:
1495 foo=string.atoi(myval[-1])
1496 return 1
1497 except:
1498 pass
1499 #ok, our last component is not a plain number or blank, let's continue
1500 if myval[-1][-1] in string.lowercase:
1501 try:
1502 foo=string.atoi(myval[-1][:-1])
1503 return 1
1504 # 1a, 2.0b, etc.
1505 except:
1506 pass
1507 #ok, maybe we have a 1_alpha or 1_beta2; let's see
1508 #ep="endpart"
1509 ep=string.split(myval[-1],"_")
1510 if len(ep)!=2:
1511 if not silent:
1512 print "!!! Name error in",myorigval
1513 return 0
1514 try:
1515 foo=string.atoi(ep[0])
1516 except:
1517 #this needs to be numeric, i.e. the "1" in "1_alpha"
1518 if not silent:
1519 print "!!! Name error in",myorigval+": characters before _ must be numeric"
1520 return 0
1521 for mye in endversion_keys:
1522 if ep[1][0:len(mye)]==mye:
1523 if len(mye)==len(ep[1]):
1524 #no trailing numeric; ok
1525 return 1
1526 else:
1527 try:
1528 foo=string.atoi(ep[1][len(mye):])
1529 return 1
1530 except:
1531 #if no endversions work, *then* we return 0
1532 pass
1533 if not silent:
1534 print "!!! Name error in",myorigval
1535 return 0
1536
1537 def isjustname(mypkg):
1538 myparts=string.split(mypkg,'-')
1539 for x in myparts:
1540 if ververify(x):
1541 return 0
1542 return 1
1543
1544 def isspecific(mypkg):
1545 mysplit=string.split(mypkg,"/")
1546 if len(mysplit)==2:
1547 if not isjustname(mysplit[1]):
1548 return 1
1549 return 0
1550
1551 # This function can be used as a package verification function, i.e.
1552 # "pkgsplit("foo-1.2-1") will return None if foo-1.2-1 isn't a valid
1553 # package (with version) name. If it is a valid name, pkgsplit will
1554 # return a list containing: [ pkgname, pkgversion(norev), pkgrev ].
1555 # For foo-1.2-1, this list would be [ "foo", "1.2", "1" ]. For
1556 # Mesa-3.0, this list would be [ "Mesa", "3.0", "0" ].
1557
1558 def pkgsplit(mypkg,silent=1):
1559 myparts=string.split(mypkg,'-')
1560 if len(myparts)<2:
1561 if not silent:
1562 print "!!! Name error in",mypkg+": missing a version or name part."
1563 return None
1564 for x in myparts:
1565 if len(x)==0:
1566 if not silent:
1567 print "!!! Name error in",mypkg+": empty \"-\" part."
1568 return None
1569 if revverify(myparts[-1]):
1570 if ververify(myparts[-2]):
1571 if len(myparts)==2:
1572 return None
1573 else:
1574 for x in myparts[:-2]:
1575 if ververify(x):
1576 return None
1577 #names can't have versiony looking parts
1578 return [string.join(myparts[:-2],"-"),myparts[-2],myparts[-1]]
1579 else:
1580 return None
1581
1582 elif ververify(myparts[-1],silent):
1583 if len(myparts)==1:
1584 if not silent:
1585 print "!!! Name error in",mypkg+": missing name part."
1586 return None
1587 else:
1588 for x in myparts[:-1]:
1589 if ververify(x):
1590 if not silent:
1591 print "!!! Name error in",mypkg+": multiple version parts."
1592 return None
1593 return [string.join(myparts[:-1],"-"),myparts[-1],"r0"]
1594 else:
1595 return None
1596
1597 def catpkgsplit(mycatpkg,silent=1):
1598 """returns [cat, pkgname, version, rev ]"""
1599 mysplit=string.split(mycatpkg,"/")
1600 if len(mysplit)!=2:
1601 if not silent:
1602 print "!!! Name error in",mycatpkg+": category or package part missing."
1603 return None
1604 mysplit2=pkgsplit(mysplit[1],silent)
1605 if mysplit2==None:
1606 return None
1607 return [mysplit[0],mysplit2[0],mysplit2[1],mysplit2[2]]
1608
1609 # vercmp:
1610 # This takes two version strings and returns an integer to tell you whether
1611 # the versions are the same, val1>val2 or val2>val1.
1612
1613 def vercmp(val1,val2):
1614 # consider 1_p2 vc 1.1
1615 # after expansion will become (1_p2,0) vc (1,1)
1616 # then 1_p2 is compared with 1 before 0 is compared with 1
1617 # to solve the bug we need to convert it to (1,0_p2)
1618 # by splitting _prepart part and adding it back _after_expansion
1619 val1_prepart = val2_prepart = ''
1620 if val1.count('_'):
1621 val1, val1_prepart = val1.split('_', 1)
1622 if val2.count('_'):
1623 val2, val2_prepart = val2.split('_', 1)
1624
1625 # replace '-' by '.'
1626 # FIXME: Is it needed? can val1/2 contain '-'?
1627 val1=string.split(val1,'-')
1628 if len(val1)==2:
1629 val1[0]=val1[0]+"."+val1[1]
1630 val2=string.split(val2,'-')
1631 if len(val2)==2:
1632 val2[0]=val2[0]+"."+val2[1]
1633
1634 val1=string.split(val1[0],'.')
1635 val2=string.split(val2[0],'.')
1636
1637 #add back decimal point so that .03 does not become "3" !
1638 for x in range(1,len(val1)):
1639 if val1[x][0] == '0' :
1640 val1[x]='.' + val1[x]
1641 for x in range(1,len(val2)):
1642 if val2[x][0] == '0' :
1643 val2[x]='.' + val2[x]
1644
1645 # extend varion numbers
1646 if len(val2)<len(val1):
1647 val2.extend(["0"]*(len(val1)-len(val2)))
1648 elif len(val1)<len(val2):
1649 val1.extend(["0"]*(len(val2)-len(val1)))
1650
1651 # add back _prepart tails
1652 if val1_prepart:
1653 val1[-1] += '_' + val1_prepart
1654 if val2_prepart:
1655 val2[-1] += '_' + val2_prepart
1656 #The above code will extend version numbers out so they
1657 #have the same number of digits.
1658 for x in range(0,len(val1)):
1659 cmp1=relparse(val1[x])
1660 cmp2=relparse(val2[x])
1661 for y in range(0,3):
1662 myret=cmp1[y]-cmp2[y]
1663 if myret != 0:
1664 return myret
1665 return 0
1666
1667
1668 def pkgcmp(pkg1,pkg2):
1669 """if returnval is less than zero, then pkg2 is newer than pkg2, zero if equal and positive if older."""
1670 mycmp=vercmp(pkg1[1],pkg2[1])
1671 if mycmp>0:
1672 return 1
1673 if mycmp<0:
1674 return -1
1675 r1=string.atoi(pkg1[2][1:])
1676 r2=string.atoi(pkg2[2][1:])
1677 if r1>r2:
1678 return 1
1679 if r2>r1:
1680 return -1
1681 return 0
1682
1683 def dep_parenreduce(mysplit,mypos=0):
1684 "Accepts a list of strings, and converts '(' and ')' surrounded items to sub-lists"
1685 while (mypos<len(mysplit)):
1686 if (mysplit[mypos]=="("):
1687 firstpos=mypos
1688 mypos=mypos+1
1689 while (mypos<len(mysplit)):
1690 if mysplit[mypos]==")":
1691 mysplit[firstpos:mypos+1]=[mysplit[firstpos+1:mypos]]
1692 mypos=firstpos
1693 break
1694 elif mysplit[mypos]=="(":
1695 #recurse
1696 mysplit=dep_parenreduce(mysplit,mypos)
1697 mypos=mypos+1
1698 mypos=mypos+1
1699 return mysplit
1700
1701 def dep_opconvert(mysplit,myuse):
1702 "Does dependency operator conversion, such as moving '||' inside a sub-list, etc."
1703 mypos=0
1704 while mypos<len(mysplit):
1705 if type(mysplit[mypos])==types.ListType:
1706 mysplit[mypos]=dep_opconvert(mysplit[mypos],myuse)
1707 elif mysplit[mypos]==")":
1708 #mismatched paren, error
1709 return None
1710 elif mysplit[mypos]=="||":
1711 if (mypos+1)<len(mysplit):
1712 if type(mysplit[mypos+1])!=types.ListType:
1713 # || must be followed by paren'd list
1714 return None
1715 else:
1716 mynew=dep_opconvert(mysplit[mypos+1],myuse)
1717 mysplit[mypos+1]=mynew
1718 mysplit[mypos+1][0:0]=["||"]
1719 del mysplit[mypos]
1720 else:
1721 #don't end a depstring with || :)
1722 return None
1723 elif mysplit[mypos][-1]=="?":
1724 #uses clause, i.e "gnome? ( foo bar )"
1725 if (mysplit[mypos][:-1]) in myuse:
1726 #if the package is installed, just delete the conditional
1727 del mysplit[mypos]
1728 else:
1729 #the package isn't installed, delete conditional and next item
1730 del mysplit[mypos]
1731 del mysplit[mypos]
1732 #we don't want to move to the next item, so we perform a quick hack
1733 mypos=mypos-1
1734 mypos=mypos+1
1735 return mysplit
1736
1737 def dep_eval(deplist):
1738 if len(deplist)==0:
1739 return 1
1740 if deplist[0]=="||":
1741 #or list; we just need one "1"
1742 for x in deplist[1:]:
1743 if type(x)==types.ListType:
1744 if dep_eval(x)==1:
1745 return 1
1746 elif x==1:
1747 return 1
1748 return 0
1749 else:
1750 for x in deplist:
1751 if type(x)==types.ListType:
1752 if dep_eval(x)==0:
1753 return 0
1754 elif x==0 or x==2:
1755 return 0
1756 return 1
1757
1758 def dep_zapdeps(unreduced,reduced):
1759 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
1760 Returned deplist contains steps that must be taken to satisfy dependencies."""
1761 if unreduced[0]=="||":
1762 if dep_eval(reduced):
1763 #deps satisfied, return None
1764 return None
1765 else:
1766 return unreduced
1767 else:
1768 if dep_eval(reduced):
1769 #deps satisfied, return None
1770 return None
1771 else:
1772 returnme=[]
1773 x=0
1774 while x<len(reduced):
1775 if type(reduced[x])==types.ListType:
1776 myresult=dep_zapdeps(unreduced[x],reduced[x])
1777 if myresult:
1778 returnme.append(myresult)
1779 else:
1780 if reduced[x]==0:
1781 returnme.append(unreduced[x])
1782 x=x+1
1783 return returnme
1784
1785 def dep_listcleanup(deplist):
1786 "remove unnecessary clutter from deplists. Remove multiple list levels, empty lists"
1787 newlist=[]
1788 if (len(deplist)==1):
1789 #remove multiple-depth lists
1790 if (type(deplist[0])==types.ListType):
1791 for x in deplist[0]:
1792 if type(x)==types.ListType:
1793 if len(x)!=0:
1794 newlist.append(dep_listcleanup(x))
1795 else:
1796 newlist.append(x)
1797 else:
1798 #unembed single nodes
1799 newlist.append(deplist[0])
1800 else:
1801 for x in deplist:
1802 if type(x)==types.ListType:
1803 if len(x)==1:
1804 newlist.append(x[0])
1805 elif len(x)!=0:
1806 newlist=newlist+dep_listcleanup(x)
1807 else:
1808 newlist.append(x)
1809 return newlist
1810
1811 # gets virtual package settings
1812
1813 def getvirtuals(myroot):
1814 myvirts={}
1815 myvirtfiles=[]
1816 if profiledir:
1817 myvirtfiles=[profiledir+"/virtuals"]
1818 myvirtfiles.append(root+"/var/cache/edb/virtuals")
1819 for myvirtfn in myvirtfiles:
1820 if not os.path.exists(myvirtfn):
1821 continue
1822 myfile=open(myvirtfn)
1823 mylines=myfile.readlines()
1824 for x in mylines:
1825 mysplit=string.split(x)
1826 if len(mysplit)<2:
1827 #invalid line
1828 continue
1829 myvirts[mysplit[0]]=mysplit[1]
1830 return myvirts
1831
1832 class packagetree:
1833 def __init__(self,virtual,clone=None):
1834 if clone:
1835 self.tree=clone.tree.copy()
1836 self.populated=clone.populated
1837 self.virtual=clone.virtual
1838 else:
1839 self.tree={}
1840 self.populated=0
1841 self.virtual=virtual
1842
1843 def load(self,mykey):
1844 "loads a cat/pkg from disk into the tree"
1845 #stub function for non-incremental caching:
1846 if not self.populated:
1847 self.populate()
1848 if not self.tree.has_key(mykey):
1849 self.tree[mykey]=[]
1850
1851 def populate(self):
1852 "populates the tree with values"
1853 populated=1
1854 pass
1855
1856 def zap(self,mycatpkg):
1857 "remove a catpkg from the deptree"
1858 cps=catpkgsplit(mycatpkg,0)
1859 mykey=cps[0]+"/"+cps[1]
1860 if not self.tree.has_key(mykey):
1861 #load cat/pkg'skeys from disk into tree
1862 self.load(mykey)
1863 x=0
1864 while x<len(self.tree[mykey]):
1865 if self.tree[mykey][x][0]==mycatpkg:
1866 del self.tree[mykey][x]
1867 x=x+1
1868 if len(self.tree[mykey])==0:
1869 self.tree[mykey]=[]
1870
1871 def inject(self,mycatpkg):
1872 "add a specific catpkg to the deptree"
1873 cps=catpkgsplit(mycatpkg,0)
1874 mykey=cps[0]+"/"+cps[1]
1875 if not self.tree.has_key(mykey):
1876 self.load(mykey)
1877 for x in self.tree[mykey]:
1878 if x[0]==mycatpkg:
1879 #already in the tree
1880 return
1881 self.tree[mykey].append([mycatpkg,cps])
1882 #new packages mean possible new auto-use settings, so regenerate USE vars
1883 settings.use_regenerate()
1884
1885 def resolve_key(self,mykey):
1886 "generates new key, taking into account virtual keys"
1887 if not self.tree.has_key(mykey):
1888 self.load(mykey)
1889 if self.tree.has_key(mykey) and len(self.tree[mykey])==0:
1890 #no packages correspond to the key
1891 if self.virtual:
1892 if self.virtual.has_key(mykey):
1893 self.load(self.virtual[mykey])
1894 return self.virtual[mykey]
1895 return mykey
1896
1897 def exists_specific(self,myspec):
1898 myspec=self.resolve_specific(myspec)
1899 if not myspec:
1900 return None
1901 cps=catpkgsplit(myspec)
1902 if not cps:
1903 return None
1904 mykey=cps[0]+"/"+cps[1]
1905 if not self.tree.has_key(mykey):
1906 self.load(mykey)
1907 for x in self.tree[mykey]:
1908 if x[0]==myspec:
1909 return 1
1910 return 0
1911
1912 def exists_specific_cat(self,myspec):
1913 "give me a specific package, and I'll tell you whether the specific node exists."
1914 myspec=self.resolve_specific(myspec)
1915 if not myspec:
1916 return None
1917 cps=catpkgsplit(myspec)
1918 if not cps:
1919 return None
1920 mykey=cps[0]+"/"+cps[1]
1921 if not self.tree.has_key(mykey):
1922 self.load(mykey)
1923 if len(self.tree[mykey]):
1924 return 1
1925 return 0
1926
1927 def resolve_specific(self,myspec):
1928 cps=catpkgsplit(myspec)
1929 if not cps:
1930 return None
1931 mykey=self.resolve_key(cps[0]+"/"+cps[1])
1932 mykey=mykey+"-"+cps[2]
1933 if cps[3]!="r0":
1934 mykey=mykey+"-"+cps[3]
1935 return mykey
1936
1937 def hasnode(self,mykey):
1938 """Does the particular node (cat/pkg key) exist?"""
1939 myreskey=self.resolve_key(mykey)
1940 if self.tree.has_key(myreskey):
1941 if len(self.tree[myreskey]):
1942 return 1
1943 return 0
1944
1945 def getallnodes(self):
1946 "returns a list of all keys in our tree"
1947 if not self.populated:
1948 self.populate()
1949 mykeys=[]
1950 for x in self.tree.keys():
1951 if len(self.tree[x]):
1952 mykeys.append(x)
1953 return mykeys
1954
1955 def getnode(self,nodename):
1956 nodename=self.resolve_key(nodename)
1957 if not nodename:
1958 return []
1959 if not self.tree.has_key(nodename):
1960 self.load(nodename)
1961 return self.tree[nodename]
1962
1963 def depcheck(self,depstring,lookatuse=1):
1964 """evaluates a dependency string and returns a 2-node result list
1965 [1, None] = ok, no dependencies
1966 [1, ["x11-base/foobar","sys-apps/oni"] = dependencies must be satisfied
1967 [0, * ] = parse error
1968 """
1969 if lookatuse:
1970 myusesplit=string.split(settings["USE"])
1971 else:
1972 #we are being run by autouse(), don't consult USE vars yet.
1973 myusesplit=[]
1974 mysplit=string.split(depstring)
1975 #convert parenthesis to sublists
1976 mysplit=dep_parenreduce(mysplit)
1977 #mysplit can't be None here, so we don't need to check
1978 mysplit=dep_opconvert(mysplit,myusesplit)
1979 #if mysplit==None, then we have a parse error (paren mismatch or misplaced ||)
1980 #up until here, we haven't needed to look at the database tree
1981
1982 if mysplit==None:
1983 return [0,"Parse Error (parenthesis mismatch?)"]
1984 elif mysplit==[]:
1985 #dependencies were reduced to nothing
1986 return [1,[]]
1987 mysplit2=mysplit[:]
1988 mysplit2=self.dep_wordreduce(mysplit2)
1989 if mysplit2==None:
1990 return [0,"Invalid token"]
1991 myeval=dep_eval(mysplit2)
1992 if myeval:
1993 return [1,[]]
1994 else:
1995 mylist=dep_listcleanup(dep_zapdeps(mysplit,mysplit2))
1996 mydict={}
1997 for x in mylist:
1998 mydict[x]=1
1999 return [1,mydict.keys()]
2000
2001 def dep_wordreduce(self,mydeplist):
2002 """Calls dep_depreduce() on all the items in the deplist"""
2003 mypos=0
2004 deplist=mydeplist[:]
2005 while mypos<len(deplist):
2006 if type(deplist[mypos])==types.ListType:
2007 #recurse
2008 deplist[mypos]=self.dep_wordreduce(deplist[mypos])
2009 else:
2010 if deplist[mypos]=="||":
2011 pass
2012 else:
2013 mydep=self.dep_depreduce(deplist[mypos])
2014 if mydep!=None:
2015 deplist[mypos]=mydep
2016 else:
2017 #encountered invalid string
2018 return None
2019 mypos=mypos+1
2020 return deplist
2021
2022 def dep_depreduce(self,mypkgdep):
2023 if mypkgdep[0]=="!":
2024 # !cat/pkg-v
2025 #catch "! " errors
2026 if not mypkgdep[1:]:
2027 return None
2028 mybestmatch=self.dep_bestmatch(mypkgdep[1:])
2029 if mybestmatch:
2030 return 0
2031 else:
2032 return 1
2033 elif mypkgdep[0]=="=":
2034 # =cat/pkg-v
2035 if mypkgdep[-1]=="*":
2036 if not mypkgdep[1:-1]:
2037 return None
2038 if not isspecific(mypkgdep[1:-1]):
2039 return None
2040 mycatpkg=catpkgsplit(mypkgdep[1:-1])
2041 try:
2042 mynewver=mycatpkg[2]
2043 mynewsplit=string.split(mycatpkg[2],'.')
2044 mynewsplit[-1]=`int(mynewsplit[-1])+1`
2045 except:
2046 return None
2047 cmp1=mycatpkg[1:]
2048 cmp2=[mycatpkg[1],string.join(mynewsplit,"."),"r0"]
2049 for x in self.getnode(mycatpkg[0]+"/"+mycatpkg[1]):
2050 if (pkgcmp(x[1][1:],cmp1)>=0) and (pkgcmp(x[1][1:],cmp2)<0):
2051 return 1
2052 else:
2053 if not mypkgdep[1:]:
2054 return None
2055 return self.exists_specific(mypkgdep[1:])
2056 elif (mypkgdep[0]=="<") or (mypkgdep[0]==">"):
2057 # >=cat/pkg-v or <=,>,<
2058 if mypkgdep[1]=="=":
2059 cmpstr=mypkgdep[0:2]
2060 cpv=mypkgdep[2:]
2061 else:
2062 cmpstr=mypkgdep[0]
2063 cpv=mypkgdep[1:]
2064 if not isspecific(cpv):
2065 return None
2066 mycatpkg=catpkgsplit(cpv,0)
2067 if not mycatpkg:
2068 #parse error
2069 return 0
2070 mykey=mycatpkg[0]+"/"+mycatpkg[1]
2071 if self.hasnode(mykey):
2072 for x in self.getnode(mykey):
2073 if eval("pkgcmp(x[1][1:],mycatpkg[1:])"+cmpstr+"0"):
2074 return 1
2075 return 0
2076 elif mypkgdep[0]=="~":
2077 if not mypkgdep[1:]:
2078 return None
2079 if not isspecific(mypkgdep[1:]):
2080 return None
2081 cp=catpkgsplit(mypkgdep[1:])
2082 if not cp:
2083 return 0
2084 mykey=cp[0]+"/"+cp[1]
2085 if self.hasnode(mykey):
2086 for x in self.getnode(mykey):
2087 if pkgcmp(x[1][1:],cp[1:])>=0 and (x[1][2]==cp[2]):
2088 return 1
2089 return 0
2090 if not isspecific(mypkgdep):
2091 # cat/pkg
2092 if self.hasnode(mypkgdep):
2093 return 1
2094 else:
2095 return 0
2096 else:
2097 return None
2098
2099 def dep_pkgcat(self,mypkgdep):
2100 """tries to find the category of a package dependency that has been provided without
2101 a category, if it couldn't be found the initial argument in returned"""
2102 # check if a slash has been provided to
2103 # seperate the category from the application
2104 # if not, seperate the deps chars and try
2105 # to find a matching category
2106 if not '/' in mypkgdep:
2107 re_deps=re.compile("^([><=~]*)(.+)$")
2108 mypkgdep_parts=re_deps.findall(mypkgdep)
2109 # set default values
2110 mypkgdep_deps=""
2111 mypkgdep_package=mypkgdep
2112 mypkgdep_packagename=mypkgdep
2113 # try to get the deps chars and package name isolated
2114 if mypkgdep_parts:
2115 mypkgdep_deps=mypkgdep_parts[0][0]
2116 mypkgdep_package=mypkgdep_parts[0][1]
2117 mypkgdep_package_parts=pkgsplit(mypkgdep_package)
2118 if mypkgdep_package_parts:
2119 mypkgdep_packagename=mypkgdep_package_parts[0]
2120 # try to contructs a full packagename with category
2121 mypkgdep_withcat = ""
2122 for cat in categories:
2123 if self.hasnode(cat+"/"+mypkgdep_packagename):
2124 mypkgdep_withcat = mypkgdep_deps+cat+"/"+mypkgdep_package
2125 break
2126 # if it succeeded, assign it as a result
2127 if mypkgdep_withcat:
2128 mypkgdep = mypkgdep_withcat
2129 return mypkgdep
2130
2131 def dep_bestmatch(self,mypkgdep):
2132 """
2133 returns best match for mypkgdep in the tree. Accepts
2134 a single depstring, such as ">foo/bar-1.0" and finds
2135 the most recent version of foo/bar that satisfies the
2136 dependency and returns it, i.e: "foo/bar-1.3". Works
2137 for >,<,>=,<=,=,and general deps. Don't call with a !
2138 dep, since there is no good match for a ! dep.
2139 """
2140 mypkgdep=self.dep_pkgcat(mypkgdep)
2141
2142 if (mypkgdep[0]=="="):
2143 if mypkgdep[-1]=="*":
2144 if not isspecific(mypkgdep[1:-1]):
2145 return ""
2146 mycatpkg=catpkgsplit(mypkgdep[1:-1])
2147 try:
2148 mynewver=mycatpkg[2]
2149 mynewsplit=string.split(mycatpkg[2],'.')
2150 mynewsplit[-1]=`int(mynewsplit[-1])+1`
2151 except:
2152 return ""
2153 mynodes=[]
2154 cmp1=mycatpkg[1:]
2155 cmp2=[mycatpkg[1],string.join(mynewsplit,"."),"r0"]
2156 for x in self.getnode(mycatpkg[0]+"/"+mycatpkg[1]):
2157 if (pkgcmp(x[1][1:],cmp1)>=0) and (pkgcmp(x[1][1:],cmp2)<0):
2158 mynodes.append(x)
2159 if len(mynodes)==0:
2160 return ""
2161 bestmatch=mynodes[0]
2162 for x in mynodes[1:]:
2163 if pkgcmp(x[1][1:],bestmatch[1][1:])>0:
2164 bestmatch=x
2165 return bestmatch[0]
2166 else:
2167 if self.exists_specific(mypkgdep[1:]):
2168 return mypkgdep[1:]
2169 else:
2170 return ""
2171 elif (mypkgdep[0]==">") or (mypkgdep[0]=="<"):
2172 if mypkgdep[1]=="=":
2173 cmpstr=mypkgdep[0:2]
2174 cpv=mypkgdep[2:]
2175 else:
2176 cmpstr=mypkgdep[0]
2177 cpv=mypkgdep[1:]
2178 if not isspecific(cpv):
2179 return ""
2180 mycatpkg=catpkgsplit(cpv)
2181 if not mycatpkg:
2182 return ""
2183 mykey=mycatpkg[0]+"/"+mycatpkg[1]
2184 if not self.hasnode(mykey):
2185 return ""
2186 mynodes=[]
2187 for x in self.getnode(mykey):
2188 if eval("pkgcmp(x[1][1:],mycatpkg[1:])"+cmpstr+"0"):
2189 mynodes.append(x)
2190 #now we have a list of all nodes that qualify
2191 if len(mynodes)==0:
2192 return ""
2193 bestmatch=mynodes[0]
2194 for x in mynodes[1:]:
2195 if pkgcmp(x[1][1:],bestmatch[1][1:])>0:
2196 bestmatch=x
2197 return bestmatch[0]
2198 elif (mypkgdep[0]=="~"):
2199 mypkg=mypkgdep[1:]
2200 if not isspecific(mypkg):
2201 return ""
2202 mycp=catpkgsplit(mypkg)
2203 if not mycp:
2204 return ""
2205 mykey=mycp[0]+"/"+mycp[1]
2206 if not self.hasnode(mykey):
2207 return ""
2208 myrev=-1
2209 for x in self.getnode(mykey):
2210 if mycp[2]!=x[1][2]:
2211 continue
2212 if string.atoi(x[1][3][1:])>myrev:
2213 myrev=string.atoi(x[1][3][1:])
2214 mymatch=x[0]
2215 if myrev==-1:
2216 return ""
2217 else:
2218 return mymatch
2219 elif not isspecific(mypkgdep):
2220 if not self.hasnode(mypkgdep):
2221 return ""
2222 mynodes=self.getnode(mypkgdep)[:]
2223 if len(mynodes)==0:
2224 return ""
2225 bestmatch=mynodes[0]
2226 for x in mynodes[1:]:
2227 if pkgcmp(x[1][1:],bestmatch[1][1:])>0:
2228 bestmatch=x
2229 return bestmatch[0]
2230
2231 def dep_nomatch(self,mypkgdep):
2232 """dep_nomatch() has a very specific purpose. You pass it a dep, like =sys-apps/foo-1.0.
2233 Then, it scans the sys-apps/foo category and returns a list of sys-apps/foo packages that
2234 *don't* match. This method is used to clean the portagetree using entries in the
2235 make.profile/packages and profiles/package.mask files.
2236 It is only intended to process specific deps, but should be robust enough to pass any type
2237 of string to it and have it not die."""
2238 mypkgdep=self.dep_pkgcat(mypkgdep)
2239
2240 returnme=[]
2241 if (mypkgdep[0]=="="):
2242 if mypkgdep[-1]=="*":
2243 if not isspecific(mypkgdep[1:-1]):
2244 return []
2245 mycatpkg=catpkgsplit(mypkgdep[1:-1])
2246 try:
2247 mynewver=mycatpkg[2]
2248 mynewsplit=string.split(mycatpkg[2],'.')
2249 mynewsplit[-1]=`int(mynewsplit[-1])+1`
2250 except:
2251 return []
2252 mynodes=[]
2253 cmp1=mycatpkg[1:]
2254 cmp2=[mycatpkg[1],string.join(mynewsplit,"."),"r0"]
2255 for x in self.getnode(mycatpkg[0]+"/"+mycatpkg[1]):
2256 if not ((pkgcmp(x[1][1:],cmp1)>=0) and (pkgcmp(x[1][1:],cmp2)<0)):
2257 mynodes.append(x[0])
2258 return mynodes
2259 else:
2260 mycp=catpkgsplit(mypkgdep[1:],1)
2261 if not mycp:
2262 #not a specific pkg, or parse error. keep silent
2263 return []
2264 mykey=mycp[0]+"/"+mycp[1]
2265 if not self.hasnode(mykey):
2266 return []
2267 x=0
2268 while x<len(self.tree[mykey]):
2269 if self.tree[mykey][x][0]!=mypkgdep[1:]:
2270 returnme.append(self.tree[mykey][x][0])
2271 x=x+1
2272 elif (mypkgdep[0]==">") or (mypkgdep[0]=="<"):
2273 if mypkgdep[1]=="=":
2274 cmpstr=mypkgdep[0:2]
2275 cpv=mypkgdep[2:]
2276 else:
2277 cmpstr=mypkgdep[0]
2278 cpv=mypkgdep[1:]
2279 if not isspecific(cpv):
2280 return []
2281 mycatpkg=catpkgsplit(cpv,1)
2282 if mycatpkg==None:
2283 #parse error
2284 return []
2285 mykey=mycatpkg[0]+"/"+mycatpkg[1]
2286 if not self.hasnode(mykey):
2287 return []
2288 for x in self.getnode(mykey):
2289 if not eval("pkgcmp(x[1][1:],mycatpkg[1:])"+cmpstr+"0"):
2290 returnme.append(x[0])
2291 elif mypkgdep[0]=="~":
2292 #"~" implies a "bestmatch"
2293 mycp=catpkgsplit(mypkgdep[1:],1)
2294 if not mycp:
2295 return []
2296 mykey=mycp[0]+"/"+mycp[1]
2297 if not self.hasnode(mykey):
2298 return []
2299 mymatch=self.dep_bestmatch(mypkgdep)
2300 if not mymatch:
2301 for x in self.tree[mykey]:
2302 returnme.append(x[0])
2303 else:
2304 x=0
2305 while x<len(self.tree[mykey]):
2306 if self.tree[mykey][x][0]!=mymatch:
2307 returnme.append(self.tree[mykey][x][0])
2308 x=x+1
2309 #end of ~ section
2310 else:
2311 return []
2312 return returnme
2313
2314 def dep_match(self,mypkgdep):
2315 """
2316 returns a list of all matches for mypkgdep in the tree. Accepts
2317 a single depstring, such as ">foo/bar-1.0" and finds
2318 all the versions of foo/bar that satisfy the
2319 dependency and returns them, i.e: ["foo/bar-1.3"]. Works
2320 for >,<,>=,<=,=,and general deps. Don't call with a !
2321 dep, since there is no good match for a ! dep.
2322 """
2323 mypkgdep=self.dep_pkgcat(mypkgdep)
2324
2325 if (mypkgdep[0]=="="):
2326 if mypkgdep[-1]=="*":
2327 if not isspecific(mypkgdep[1:-1]):
2328 return []
2329 mycatpkg=catpkgsplit(mypkgdep[1:-1])
2330 try:
2331 mynewver=mycatpkg[2]
2332 mynewsplit=string.split(mycatpkg[2],'.')
2333 mynewsplit[-1]=`int(mynewsplit[-1])+1`
2334 except:
2335 return []
2336 mynodes=[]
2337 cmp1=mycatpkg[1:]
2338 cmp2=[mycatpkg[1],string.join(mynewsplit,"."),"r0"]
2339 for x in self.getnode(mycatpkg[0]+"/"+mycatpkg[1]):
2340 if ((pkgcmp(x[1][1:],cmp1)>=0) and (pkgcmp(x[1][1:],cmp2)<0)):
2341 mynodes.append(x[0])
2342 return mynodes
2343 elif self.exists_specific(mypkgdep[1:]):
2344 return [mypkgdep[1:]]
2345 else:
2346 return []
2347 elif (mypkgdep[0]==">") or (mypkgdep[0]=="<"):
2348 if mypkgdep[1]=="=":
2349 cmpstr=mypkgdep[0:2]
2350 cpv=mypkgdep[2:]
2351 else:
2352 cmpstr=mypkgdep[0]
2353 cpv=mypkgdep[1:]
2354 if not isspecific(cpv):
2355 return []
2356 mycatpkg=catpkgsplit(cpv,0)
2357 if mycatpkg==None:
2358 #parse error
2359 return []
2360 mykey=mycatpkg[0]+"/"+mycatpkg[1]
2361 if not self.hasnode(mykey):
2362 return []
2363 mynodes=[]
2364 for x in self.getnode(mykey):
2365 if eval("pkgcmp(x[1][1:],mycatpkg[1:])"+cmpstr+"0"):
2366 mynodes.append(x[0])
2367 #now we have a list of all nodes that qualify
2368 #since we want all nodes that match, return this list
2369 return mynodes
2370 elif mypkgdep[0]=="~":
2371 #"~" implies a "bestmatch"
2372 return [self.dep_bestmatch(mypkgdep)]
2373 elif not isspecific(mypkgdep):
2374 if not self.hasnode(mypkgdep):
2375 return []
2376 mynodes=[]
2377 for x in self.getnode(mypkgdep)[:]:
2378 mynodes.append(x[0])
2379 return mynodes
2380
2381 class vartree(packagetree):
2382 "this tree will scan a var/db/pkg database located at root (passed to init)"
2383 def __init__(self,root="/",virtual=None,clone=None):
2384 if clone:
2385 self.root=clone.root
2386 self.gotcat=copy.deepcopy(clone.gotcat)
2387 else:
2388 self.root=root
2389 self.gotcat={}
2390 packagetree.__init__(self,virtual,clone)
2391 def getebuildpath(self,fullpackage):
2392 cat,package=fullpackage.split("/")
2393 return self.root+"var/db/pkg/"+fullpackage+"/"+package+".ebuild"
2394
2395 def load(self,mykey):
2396 if '/' in mykey:
2397 mycat,mypkg=string.split(mykey,"/")
2398 else:
2399 return []
2400 if not self.tree.has_key(mykey):
2401 self.tree[mykey]=[]
2402 #This next line allows us to set root to None and disable loading (for "emptytrees")
2403 if not self.root:
2404 return
2405 if self.gotcat.has_key(mycat):
2406 return
2407 if not os.path.isdir(self.root+"/var/db/pkg/"+mycat):
2408 return
2409 for x in os.listdir(self.root+"/var/db/pkg/"+mycat):
2410 if x[0:len(mypkg)]!=mypkg:
2411 #skip, since we're definitely not interested if the package name doesn't match.
2412 #note that this isn't a perfect test, but will weed out 99% of the packages we aren't interested in loading.
2413 continue
2414 if isjustname(x):
2415 fullpkg=mycat+"/"+x+"-1.0"
2416 else:
2417 fullpkg=mycat+"/"+x
2418 mysplit=catpkgsplit(fullpkg,0)
2419 if mysplit==None:
2420 print "!!! Error:",self.root+"/var/db/pkg/"+mycat+"/"+x,"is not a valid database entry, skipping..."
2421 continue
2422 mynewkey=mycat+"/"+mysplit[1]
2423 if not self.tree.has_key(mynewkey):
2424 self.tree[mynewkey]=[]
2425 for y in self.tree[mynewkey]:
2426 if y[0]==fullpkg:
2427 #we've already got it, skip.
2428 continue
2429 self.tree[mynewkey].append([fullpkg,mysplit])
2430
2431 def getslot(self,mycatpkg):
2432 """Get a slot for a catpkg; assume it exists."""
2433 if not os.path.exists(self.root+"var/db/pkg/"+mycatpkg+"/SLOT"):
2434 return ""
2435 myslotfile=open(self.root+"var/db/pkg/"+mycatpkg+"/SLOT","r")
2436 myslotvar=string.split(myslotfile.readline())
2437 myslotfile.close()
2438 if len(myslotvar):
2439 return myslotvar[0]
2440 else:
2441 return ""
2442
2443 def gettimeval(self,mycatpkg):
2444 """Get an integer time value that can be used to compare against other catpkgs; the timeval will try to use
2445 COUNTER but will also take into account the start time of Portage and use mtimes of CONTENTS files if COUNTER
2446 doesn't exist. The algorithm makes it safe to compare the timeval values of COUNTER-enabled and non-COUNTER
2447 db entries. Assumes mycatpkg exists."""
2448 global starttime
2449 rootp=self.root+"var/db/pkg/"+mycatpkg
2450 if not os.path.exists(rootp+"/COUNTER"):
2451 if not os.path.exists(rootp+"/CONTENTS"):
2452 return 0
2453 else:
2454 return os.stat(rootp+"/CONTENTS")[ST_MTIME]
2455 else:
2456 mycounterfile=open(rootp+"/COUNTER","r")
2457 mycountervar=string.atoi(string.split(mycounterfile.readline())[0])
2458 mycounterfile.close()
2459 return starttime+mycountervar
2460
2461 def populate(self):
2462 "populates the local tree (/var/db/pkg)"
2463 prevmask=os.umask(0)
2464 if not os.path.isdir(self.root+"var"):
2465 os.mkdir(self.root+"var",0755)
2466 if not os.path.isdir(self.root+"var/db"):
2467 os.mkdir(self.root+"var/db",0755)
2468 if not os.path.isdir(self.root+"var/db/pkg"):
2469 os.mkdir(self.root+"var/db/pkg",0755)
2470 os.umask(prevmask)
2471 dbdir=self.root+"var/db/pkg"
2472 origdir=getmycwd()
2473 os.chdir(dbdir)
2474 mywd=os.getcwd()
2475 for x in os.listdir(mywd):
2476 if not os.path.isdir(mywd+"/"+x):
2477 continue
2478 for y in os.listdir(mywd+"/"+x):
2479 if isjustname(y):
2480 fullpkg=x+"/"+y+"-1.0"
2481 else:
2482 fullpkg=x+"/"+y
2483 mysplit=catpkgsplit(fullpkg,0)
2484 if mysplit==None:
2485 print "!!! Error:",self.root+"var/db/pkg/"+x+"/"+y,"is not a valid database entry, skipping..."
2486 continue
2487 mykey=x+"/"+mysplit[1]
2488 if not self.tree.has_key(mykey):
2489 self.tree[mykey]=[]
2490 self.tree[mykey].append([fullpkg,mysplit])
2491 os.chdir(origdir)
2492 self.populated=1
2493
2494 class portagetree(packagetree):
2495 "this tree will scan a portage directory located at root (passed to init)"
2496 def __init__(self,root="/",virtual=None,clone=None):
2497 if clone:
2498 self.root=clone.root
2499 self.portroot=clone.portroot
2500 self.pkgmaskdict=clone.pkgmaskdict
2501 self.pkglines=clone.pkglines
2502 else:
2503 self.root=root
2504 self.portroot=settings["PORTDIR"]
2505 self.pkgmaskdict={}
2506 self.pkgmasklines=grabfile(self.portroot+"/profiles/package.mask")
2507 self.pkglines=[]
2508 #remove '*'s from beginnning of deps
2509 if profiledir:
2510 for x in grabfile(profiledir+"/packages"):
2511 if x[0]=="*":
2512 self.pkglines.append(x[1:])
2513 else:
2514 self.pkglines.append(x)
2515 packagetree.__init__(self,virtual)
2516
2517 def populate(self):
2518 "populates the port tree"
2519 origdir=getmycwd()
2520 os.chdir(self.portroot)
2521 for x in categories:
2522 if not os.path.isdir(os.getcwd()+"/"+x):
2523 continue
2524 for y in os.listdir(os.getcwd()+"/"+x):
2525 if not os.path.isdir(os.getcwd()+"/"+x+"/"+y):
2526 continue
2527 if y=="CVS":
2528 continue
2529 for mypkg in os.listdir(os.getcwd()+"/"+x+"/"+y):
2530 if mypkg[-7:] != ".ebuild":
2531 continue
2532 mypkg=mypkg[:-7]
2533 mykey=x+"/"+y
2534 fullpkg=x+"/"+mypkg
2535 if not self.tree.has_key(mykey):
2536 self.tree[mykey]=[]
2537 mysplit=catpkgsplit(fullpkg,0)
2538 if mysplit==None:
2539 print "!!! Error:",self.portroot+"/"+x+"/"+y,"is not a valid Portage directory, skipping..."
2540 continue
2541 self.tree[mykey].append([fullpkg,mysplit])
2542 #self.populated must be set here, otherwise dep_match will cause recursive populate() calls
2543 self.populated=1
2544 self.domask()
2545
2546 def domask(self):
2547 "mask out appropriate entries in our database. We call this whenever we add to the db."
2548 for x in self.pkgmasklines:
2549 matches=self.dep_match(x)
2550 if matches:
2551 for y in matches:
2552 self.zap(y)
2553 for x in self.pkglines:
2554 matches=self.dep_nomatch(x)
2555 for y in matches:
2556 self.zap(y)
2557
2558 def getdeps(self,pf):
2559 "returns list of dependencies, if any"
2560 if self.exists_specific(pf):
2561 mysplit=catpkgsplit(pf)
2562 if mysplit==None:
2563 #parse error
2564 return ""
2565 mydepfile=self.portroot+"/"+mysplit[0]+"/"+mysplit[1]+"/files/depend-"+string.split(pf,"/")[1]
2566 if os.path.exists(mydepfile):
2567 myd=open(mydepfile,"r")
2568 mydeps=myd.readlines()
2569 myd.close()
2570 returnme=""
2571 for x in mydeps:
2572 returnme=returnme+" "+x[:-1]
2573 return returnme
2574 return ""
2575
2576 def getname(self,pkgname):
2577 "returns file location for this particular package"
2578 pkgname=self.resolve_specific(pkgname)
2579 if not pkgname:
2580 return ""
2581 mysplit=string.split(pkgname,"/")
2582 psplit=pkgsplit(mysplit[1])
2583 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
2584
2585 class binarytree(packagetree):
2586 "this tree scans for a list of all packages available in PKGDIR"
2587 def __init__(self,root="/",virtual=None,clone=None):
2588 if clone:
2589 self.root=clone.root
2590 self.pkgdir=clone.pkgdir
2591 else:
2592 self.root=root
2593 self.pkgdir=settings["PKGDIR"]
2594 packagetree.__init__(self,virtual)
2595 def populate(self):
2596 "popules the binarytree"
2597 if (not os.path.isdir(self.pkgdir)):
2598 return 0
2599 for mypkg in os.listdir(self.pkgdir+"/All"):
2600 if mypkg[-5:]!=".tbz2":
2601 continue
2602 mytbz2=xpak.tbz2(self.pkgdir+"/All/"+mypkg)
2603 mycat=mytbz2.getfile("CATEGORY")
2604 if not mycat:
2605 #old-style or corrupt package
2606 continue
2607 mycat=string.strip(mycat)
2608 fullpkg=mycat+"/"+mypkg[:-5]
2609 cps=catpkgsplit(fullpkg,0)
2610 if cps==None:
2611 print "!!! Error:",mytbz2,"contains corrupt cat/pkg information, skipping..."
2612 continue
2613 mykey=mycat+"/"+cps[1]
2614 if not self.tree.has_key(mykey):
2615 self.tree[mykey]=[]
2616 self.tree[mykey].append([fullpkg,cps])
2617 self.populated=1
2618
2619 def getname(self,pkgname):
2620 "returns file location for this particular package"
2621 mysplit=string.split(pkgname,"/")
2622 if len(mysplit)==1:
2623 return self.pkgdir+"/All/"+self.resolve_specific(pkgname)+".tbz2"
2624 else:
2625 return self.pkgdir+"/All/"+mysplit[1]+".tbz2"
2626
2627 class dblink:
2628 "this class provides an interface to the standard text package database"
2629 def __init__(self,cat,pkg,myroot):
2630 "create a dblink object for cat/pkg. This dblink entry may or may not exist"
2631 self.cat=cat
2632 self.pkg=pkg
2633 self.dbdir=myroot+"/var/db/pkg/"+cat+"/"+pkg
2634 self.myroot=myroot
2635
2636 def getpath(self):
2637 "return path to location of db information (for >>> informational display)"
2638 return self.dbdir
2639
2640 def exists(self):
2641 "does the db entry exist? boolean."
2642 return os.path.exists(self.dbdir)
2643
2644 def create(self):
2645 "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
2646 if not os.path.exists(self.dbdir):
2647 os.makedirs(self.dbdir)
2648
2649 def delete(self):
2650 "erase this db entry completely"
2651 if not os.path.exists(self.dbdir):
2652 return
2653 for x in os.listdir(self.dbdir):
2654 os.unlink(self.dbdir+"/"+x)
2655 os.rmdir(self.dbdir)
2656
2657 def clearcontents(self):
2658 if os.path.exists(self.dbdir+"/CONTENTS"):
2659 os.unlink(self.dbdir+"/CONTENTS")
2660
2661 def getcontents(self):
2662 if not os.path.exists(self.dbdir+"/CONTENTS"):
2663 return None
2664 pkgfiles={}
2665 myc=open(self.dbdir+"/CONTENTS","r")
2666 mylines=myc.readlines()
2667 myc.close()
2668 for line in mylines:
2669 mydat=string.split(line)
2670 # we do this so we can remove from non-root filesystems
2671 # (use the ROOT var to allow maintenance on other partitions)
2672 mydat[1]=os.path.normpath(root+mydat[1][1:])
2673 if mydat[0]=="obj":
2674 #format: type, mtime, md5sum
2675 pkgfiles[string.join(mydat[1:-2]," ")]=[mydat[0], mydat[-1], mydat[-2]]
2676 elif mydat[0]=="dir":
2677 #format: type
2678 pkgfiles[string.join(mydat[1:])]=[mydat[0] ]
2679 elif mydat[0]=="sym":
2680 #format: type, mtime, dest
2681 x=len(mydat)-1
2682 splitter=-1
2683 while(x>=0):
2684 if mydat[x]=="->":
2685 splitter=x
2686 break
2687 x=x-1
2688 if splitter==-1:
2689 return None
2690 pkgfiles[string.join(mydat[1:splitter]," ")]=[mydat[0], mydat[-1], string.join(mydat[(splitter+1):-1]," ")]
2691 elif mydat[0]=="dev":
2692 #format: type
2693 pkgfiles[string.join(mydat[1:]," ")]=[mydat[0] ]
2694 elif mydat[0]=="fif":
2695 #format: type
2696 pkgfiles[string.join(mydat[1:]," ")]=[mydat[0]]
2697 else:
2698 return None
2699 return pkgfiles
2700
2701 def unmerge(self,pkgfiles=None):
2702 if not pkgfiles:
2703 pkgfiles=self.getcontents()
2704 if not pkgfiles:
2705 return
2706 myebuildpath=self.dbdir+"/"+self.pkg+".ebuild"
2707 if not os.path.exists(myebuildpath):
2708 myebuildpath=None
2709 #do prerm script
2710 if myebuildpath:
2711 a=doebuild(myebuildpath,"prerm",self.myroot)
2712 if a:
2713 print "!!! pkg_prerm() script failed; exiting."
2714 sys.exit(a)
2715
2716 #we do this so we don't unmerge the ebuild file by mistake
2717 myebuildfile=os.path.normpath(self.dbdir+"/"+self.pkg+".ebuild")
2718 if os.path.exists(myebuildfile):
2719 if pkgfiles.has_key(myebuildfile):
2720 del pkgfiles[myebuildfile]
2721
2722 mykeys=pkgfiles.keys()
2723 mykeys.sort()
2724 mykeys.reverse()
2725
2726 #do some config file management prep
2727 self.protect=[]
2728 for x in string.split(settings["CONFIG_PROTECT"]):
2729 ppath=os.path.normpath(self.myroot+"/"+x)+"/"
2730 if os.path.isdir(ppath):
2731 self.protect.append(ppath)
2732 print ">>> Config file management enabled for",ppath
2733
2734 self.protectmask=[]
2735 for x in string.split(settings["CONFIG_PROTECT_MASK"]):
2736 ppath=os.path.normpath(self.myroot+"/"+x)+"/"
2737 if os.path.isdir(ppath):
2738 self.protectmask.append(ppath)
2739 #if it doesn't exist, silently skip it
2740
2741 for obj in mykeys:
2742 obj=os.path.normpath(obj)
2743 if not os.path.islink(obj):
2744 #we skip this if we're dealing with a symlink
2745 #because os.path.exists() will operate on the
2746 #link target rather than the link itself.
2747 if not os.path.exists(obj):
2748 print "--- !found", pkgfiles[obj][0], obj
2749 continue
2750 if (pkgfiles[obj][0] not in ("dir","fif","dev")) and (getmtime(obj) != pkgfiles[obj][1]):
2751 print "--- !mtime", pkgfiles[obj][0], obj
2752 continue
2753 if pkgfiles[obj][0]=="dir":
2754 if not os.path.isdir(obj):
2755 print "--- !dir ","dir", obj
2756 continue
2757 if os.listdir(obj):
2758 print "--- !empty","dir", obj
2759 continue
2760 try:
2761 os.rmdir(obj)
2762 except OSError:
2763 #We couldn't remove the dir; maybe it's immutable?
2764 pass
2765 print "<<< ","dir",obj
2766 elif pkgfiles[obj][0]=="sym":
2767 if not os.path.islink(obj):
2768 print "--- !sym ","sym", obj
2769 continue
2770 if (getmtime(obj) != pkgfiles[obj][1]):
2771 print "--- !mtime sym",obj
2772 continue
2773 mydest=os.readlink(obj)
2774 if os.path.exists(os.path.normpath(self.myroot+mydest)):
2775 if mydest != pkgfiles[obj][2]:
2776 print "--- !destn","sym", obj
2777 continue
2778 myppath=""
2779 for ppath in self.protect:
2780 if obj[0:len(ppath)]==ppath:
2781 masked=0
2782 #config file management
2783 for pmpath in self.protectmask:
2784 if obj[0:len(pmpath)]==pmpath:
2785 #skip, it's in the mask
2786 masked=1
2787 break
2788 if not masked:
2789 myppath=ppath
2790 break
2791 if myppath:
2792 print "--- cfgpro ","sym",obj
2793 continue
2794 try:
2795 os.unlink(obj)
2796 except OSError:
2797 #immutable?
2798 pass
2799 print "<<< ","sym",obj
2800 elif pkgfiles[obj][0]=="obj":
2801 if not os.path.isfile(obj):
2802 print "--- !obj ","obj", obj
2803 continue
2804 mymd5=perform_md5(obj)
2805 # string.lower is needed because db entries used to be in upper-case. The
2806 # string.lower allows for backwards compatibility.
2807 if mymd5 != string.lower(pkgfiles[obj][2]):
2808 print "--- !md5 ","obj", obj
2809 continue
2810 myppath=""
2811 for ppath in self.protect:
2812 if obj[0:len(ppath)]==ppath:
2813 masked=0
2814 #config file management
2815 for pmpath in self.protectmask:
2816 if obj[0:len(pmpath)]==pmpath:
2817 #skip, it's in the mask
2818 masked=1
2819 break
2820 if not masked:
2821 myppath=ppath
2822 break
2823 if myppath:
2824 print "--- cfgpro ","obj",obj
2825 else:
2826 try:
2827 os.unlink(obj)
2828 except OSError:
2829 pass
2830 print "<<< ","obj",obj
2831 elif pkgfiles[obj][0]=="fif":
2832 if not isfifo(obj):
2833 print "--- !fif ","fif", obj
2834 continue
2835 myppath=""
2836 for ppath in self.protect:
2837 if obj[0:len(ppath)]==ppath:
2838 masked=0
2839 #config file management
2840 for pmpath in self.protectmask:
2841 if obj[0:len(pmpath)]==pmpath:
2842 #skip, it's in the mask
2843 masked=1
2844 break
2845 if not masked:
2846 myppath=ppath
2847 break
2848 if myppath:
2849 print "--- cfgpro ","fif",obj
2850 continue
2851 try:
2852 os.unlink(obj)
2853 except OSError:
2854 pass
2855 print "<<< ","fif",obj
2856 elif pkgfiles[obj][0]=="dev":
2857 print "--- ","dev",obj
2858
2859 #remove self from vartree database so that our own virtual gets zapped if we're the last node
2860 db[self.myroot]["vartree"].zap(self.cat+"/"+self.pkg)
2861 #remove stale virtual entries (mappings for packages that no longer exist)
2862 newvirts={}
2863 myvirts=grabdict(self.myroot+"var/cache/edb/virtuals")
2864 for myvirt in myvirts.keys():
2865 newvirts[myvirt]=[]
2866 for mykey in myvirts[myvirt]:
2867 if db[self.myroot]["vartree"].hasnode(mykey):
2868 newvirts[myvirt].append(mykey)
2869 if newvirts[myvirt]==[]:
2870 del newvirts[myvirt]
2871 writedict(newvirts,self.myroot+"var/cache/edb/virtuals")
2872
2873 #do original postrm
2874 if myebuildpath:
2875 a=doebuild(myebuildpath,"postrm",self.myroot)
2876 if a:
2877 print "!!! pkg_postrm() script failed; exiting."
2878 sys.exit(a)
2879
2880 def treewalk(self,srcroot,destroot,inforoot,myebuild):
2881 # srcroot = ${D}; destroot=where to merge, ie. ${ROOT}, inforoot=root of db entry,
2882 # secondhand = list of symlinks that have been skipped due to their target not existing (will merge later),
2883 "this is going to be the new merge code"
2884 if not os.path.exists(self.dbdir):
2885 self.create()
2886 # print ">>> Updating mtimes..."
2887 # before merging, it's *very important* to touch all the files
2888 # this ensures that their mtime is current and unmerging works correctly
2889 # spawn("(cd "+srcroot+"; for x in `find`; do touch -c $x 2>/dev/null; done)",free=1)
2890 print ">>> Merging",self.cat+"/"+self.pkg,"to",destroot
2891 # get current counter value
2892 edbpath=destroot+"/var/cache/edb/"
2893 counterpath=edbpath+"counter"
2894 packagecounter=long(0)
2895 globalcounterfile=None
2896 if not os.path.exists(edbpath):
2897 os.makedirs(edbpath)
2898 if os.path.exists(counterpath):
2899 globalcounterfile=open(counterpath, "r+")
2900 fcntl.flock(globalcounterfile.fileno(), fcntl.LOCK_EX)
2901 packagecounter=long(globalcounterfile.readline())
2902 else:
2903 globalcounterfile=open(counterpath, "w")
2904 fcntl.flock(globalcounterfile.fileno(), fcntl.LOCK_EX)
2905 packagecounter=packagecounter+1
2906 # write package counter
2907 localcounterfile=open(inforoot+"/COUNTER","w")
2908 localcounterfile.write(str(packagecounter))
2909 localcounterfile.close()
2910 # update global counter
2911 globalcounterfile.seek(0,0)
2912 globalcounterfile.truncate(0);
2913 globalcounterfile.write(str(packagecounter))
2914 fcntl.flock(globalcounterfile.fileno(), fcntl.LOCK_UN)
2915 globalcounterfile.close()
2916 #This next line just ends up confusing people and I don't think it's absolutely necessary;
2917 #commented out (drobbins)
2918 #print ">>> Package will have counter",packagecounter
2919 # get old contents info for later unmerging
2920 oldcontents=self.getcontents()
2921 # run preinst script
2922 if myebuild:
2923 # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
2924 # (if any).
2925 a=doebuild(myebuild,"preinst",root)
2926 else:
2927 a=doebuild(inforoot+"/"+self.pkg+".ebuild","preinst",root)
2928 if a:
2929 print "!!! pkg_preinst() script failed; exiting."
2930 sys.exit(a)
2931 # open CONTENTS file (possibly overwriting old one) for recording
2932 outfile=open(inforoot+"/CONTENTS","w")
2933 # prep for config file management
2934 self.protect=[]
2935 # self.protect records any paths in CONFIG_PROTECT that are real directories and exist
2936 for x in string.split(settings["CONFIG_PROTECT"]):
2937 ppath=os.path.normpath(destroot+"/"+x)+"/"
2938 if os.path.isdir(ppath):
2939 self.protect.append(ppath)
2940 self.protectmask=[]
2941 # self.protectmask records any paths in CONFIG_PROTECT_MASK that are real directories and exist
2942 for x in string.split(settings["CONFIG_PROTECT_MASK"]):
2943 ppath=os.path.normpath(destroot+"/"+x)+"/"
2944 if os.path.isdir(ppath):
2945 self.protectmask.append(ppath)
2946 cfgfiledict={}
2947 #if we have a file containing previously-merged config file md5sums, grab it.
2948 if os.path.exists(destroot+"/var/cache/edb/config"):
2949 cfgfiledict=grabdict(destroot+"/var/cache/edb/config")
2950 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
2951 mymtime=int(time.time())
2952 prevmask=os.umask(0)
2953 secondhand=[]
2954 # we do a first merge; this will recurse through all files in our srcroot but also build up a
2955 # "second hand" of symlinks to merge later
2956 self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime)
2957 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
2958 # broken symlinks. We'll merge them too.
2959 lastlen=0
2960 while len(secondhand) and len(secondhand)!=lastlen:
2961 # clear the thirdhand. Anything from our second hand that couldn't get merged will be
2962 # added to thirdhand.
2963 thirdhand=[]
2964 self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
2965 #swap hands
2966 lastlen=len(secondhand)
2967 # our thirdhand now becomes our secondhand. It's ok to throw away secondhand since
2968 # thirdhand contains all the stuff that couldn't be merged.
2969 secondhand=thirdhand
2970 if len(secondhand):
2971 # force merge of remaining symlinks (broken or circular; oh well)
2972 self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
2973
2974 #restore umask
2975 os.umask(prevmask)
2976 #if we opened it, close it
2977 outfile.close()
2978 print
2979 if (oldcontents):
2980 print ">>> Safely unmerging already-installed instance..."
2981 self.unmerge(oldcontents)
2982 print ">>> original instance of package unmerged safely."
2983 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
2984 for x in os.listdir(inforoot):
2985 self.copyfile(inforoot+"/"+x)
2986
2987 #write out our collection of md5sums
2988 writedict(cfgfiledict,destroot+"/var/cache/edb/config")
2989
2990 #create virtual links
2991 myprovides=self.getelements("PROVIDE")
2992 if myprovides:
2993 myvkey=self.cat+"/"+pkgsplit(self.pkg)[0]
2994 myvirts=grabdict(destroot+"var/cache/edb/virtuals")
2995 for mycatpkg in self.getelements("PROVIDE"):
2996 if isspecific(mycatpkg):
2997 #convert a specific virtual like dev-lang/python-2.2 to dev-lang/python
2998 mysplit=catpkgsplit(mycatpkg)
2999 mycatpkg=mysplit[0]+"/"+mysplit[1]
3000 if myvirts.has_key(mycatpkg):
3001 if myvkey not in myvirts[mycatpkg]:
3002 myvirts[mycatpkg][0:0]=[myvkey]
3003 else:
3004 myvirts[mycatpkg]=[myvkey]
3005 writedict(myvirts,destroot+"var/cache/edb/virtuals")
3006
3007 #do postinst script
3008 if myebuild:
3009 # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
3010 # (if any).
3011 a=doebuild(myebuild,"postinst",root)
3012 else:
3013 a=doebuild(inforoot+"/"+self.pkg+".ebuild","postinst",root)
3014 if a:
3015 print "!!! pkg_postinst() script failed; exiting."
3016 sys.exit(a)
3017 #update environment settings, library paths
3018 env_update()
3019 print ">>>",self.cat+"/"+self.pkg,"merged."
3020
3021 def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
3022 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
3023 if type(stufftomerge)==types.StringType:
3024 #A directory is specified. Figure out protection paths, listdir() it and process it.
3025 mergelist=os.listdir(srcroot+stufftomerge)
3026 offset=stufftomerge
3027 # We need mydest defined up here to calc. protection paths. This is now done once per
3028 # directory rather than once per file merge. This should really help merge performance.
3029 mytruncpath="/"+offset+"/"
3030 myppath=""
3031 for ppath in self.protect:
3032 #before matching against a protection path.
3033 if mytruncpath[0:len(ppath)]==ppath:
3034 myppath=ppath
3035 #config file management
3036 for pmpath in self.protectmask:
3037 #again, dir symlinks are expanded
3038 if mytruncpath[0:len(pmpath)]==pmpath:
3039 #skip, it's in the mask
3040 myppath=""
3041 break
3042 if not myppath:
3043 break
3044 myppath=(myppath!="")
3045 else:
3046 mergelist=stufftomerge
3047 offset=""
3048 for x in mergelist:
3049 mysrc=srcroot+offset+x
3050 mydest=destroot+offset+x
3051 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
3052 myrealdest="/"+offset+x
3053 # stat file once, test using S_* macros many times (faster that way)
3054 mystat=os.lstat(mysrc)
3055 mymode=mystat[ST_MODE]
3056 # handy variables; mydest is the target object on the live filesystems;
3057 # mysrc is the source object in the temporary install dir
3058 try:
3059 mydmode=os.lstat(mydest)[ST_MODE]
3060 except:
3061 #dest file doesn't exist
3062 mydmode=None
3063
3064 if S_ISLNK(mymode):
3065 # we are merging a symbolic link
3066 myto=os.readlink(mysrc)
3067 # myrealto contains the path of the real file to which this symlink points.
3068 # we can simply test for existence of this file to see if the target has been merged yet
3069 myrealto=os.path.normpath(os.path.join(destroot,myto))
3070 if mydmode!=None:
3071 #destination exists
3072 if (not S_ISLNK(mydmode)) and (S_ISDIR(mydmode)):
3073 # directory in the way: we can't merge a symlink over a directory
3074 print "!!!",mydest,"->",myto
3075 # we won't merge this, continue with next file...
3076 continue
3077 # if secondhand==None it means we're operating in "force" mode and should not create a second hand.
3078 if (secondhand!=None) and (not os.path.exists(myrealto)):
3079 # either the target directory doesn't exist yet or the target file doesn't exist -- or
3080 # the target is a broken symlink. We will add this file to our "second hand" and merge
3081 # it later.
3082 secondhand.append(mysrc[len(srcroot):])
3083 continue
3084 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
3085 mymtime=movefile(mysrc,mydest,thismtime,mystat)
3086 if mymtime!=None:
3087 print ">>>",mydest,"->",myto
3088 outfile.write("sym "+myrealdest+" -> "+myto+" "+`mymtime`+"\n")
3089 else:
3090 print "!!!",mydest,"->",myto
3091 elif S_ISDIR(mymode):
3092 # we are merging a directory
3093 if mydmode!=None:
3094 # destination exists
3095 if S_ISLNK(mydmode) or S_ISDIR(mydmode):
3096 # a symlink to an existing directory will work for us; keep it:
3097 print "---",mydest+"/"
3098 else:
3099 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
3100 movefile(mydest,mydest+".backup")
3101 print "bak",mydest,mydest+".backup"
3102 #now create our directory
3103 os.mkdir(mydest)
3104 os.chmod(mydest,mystat[0])
3105 os.chown(mydest,mystat[4],mystat[5])
3106 print ">>>",mydest+"/"
3107 else:
3108 #destination doesn't exist
3109 os.mkdir(mydest)
3110 os.chmod(mydest,mystat[0])
3111 os.chown(mydest,mystat[4],mystat[5])
3112 print ">>>",mydest+"/"
3113 outfile.write("dir "+myrealdest+"\n")
3114 # recurse and merge this directory
3115 self.mergeme(srcroot,destroot,outfile,secondhand,offset+x+"/",cfgfiledict,thismtime)
3116 elif S_ISREG(mymode):
3117 # we are merging a regular file
3118 mymd5=perform_md5(mysrc)
3119 # calculate config file protection stuff
3120 mydestdir=os.path.dirname(mydest)
3121 moveme=1
3122 zing="!!!"
3123 if mydmode!=None:
3124 # destination file exists
3125 if S_ISDIR(mydmode):
3126 # install of destination is blocked by an existing directory with the same name
3127 moveme=0
3128 print "!!!",mydest
3129 elif S_ISREG(mydmode):
3130 # install of destination is blocked by an existing regular file; now, config file
3131 # management may come into play.
3132 # we only need to tweak mydest if cfg file management is in play.
3133 if myppath:
3134 # we have a protection path; enable config file management.
3135 destmd5=perform_md5(mydest)
3136 if cfgfiledict.has_key(myrealdest):
3137 #this file has been merged in the past, either as the original file or as a ._cfg extension of original.
3138 #we can skip the merging of this file. But we need to do one thing first, called "cycling". Let's say that
3139 #since the last merge on this file, the user has copied /etc/._cfg0000_foo to /etc/foo. The ._cfg had
3140 #position 4 in our md5 list (in cfgfiledict). Now that the file has been moved into place, we want to
3141 #*throw away* md5s 0-3. Reasoning? By doing this, we discard expired md5sums, and also allow a *new*
3142 #package to merge a "classic" version of the file (consider if the new version was buggy, so we reverted
3143 #to the original... without this important code, the new "original" would not get merged since it had
3144 #been merged before.
3145 if destmd5 in cfgfiledict[myrealdest]:
3146 cfgfiledict[myrealdest]=cfgfiledict[myrealdest][cfgfiledict[myrealdest].index(destmd5):]
3147 if mymd5==destmd5:
3148 #file already in place, so no need to merge this file. However, we need to update the
3149 #target file's times:
3150 os.utime(mydest,(thismtime,thismtime))
3151 zing="---"
3152 moveme=0
3153 elif cfgfiledict.has_key(myrealdest) and (mymd5 in cfgfiledict[myrealdest]):
3154 #ok, now that we've cycled cfgfiledict (see big paragraph above), it's safe to simply not merge this file
3155 #if it has been merged by us in the past. Thanks to the cycling, we can be do this with some assurance
3156 #that we are not being overly zealous in our desire to avoid merging files unnecessarily.
3157 zing="---"
3158 moveme=0
3159 else:
3160 #don't overwrite --
3161 # the files are not identical (from an md5 perspective); we cannot simply overwrite.
3162 pnum=-1
3163 # set pmatch to the literal filename only
3164 pmatch=os.path.basename(mydest)
3165 # config protection filename format:
3166 # ._cfg0000_foo
3167 # positioning (for reference):
3168 # 0123456789012
3169 mypfile=""
3170 for pfile in os.listdir(mydestdir):
3171 if pfile[0:5]!="._cfg":
3172 continue
3173 if pfile[10:]!=pmatch:
3174 continue
3175 try:
3176 newpnum=string.atoi(pfile[5:9])
3177 if newpnum>pnum:
3178 pnum=newpnum
3179 mypfile=pfile
3180 except:
3181 continue
3182 pnum=pnum+1
3183 # mypfile is set to the name of the most recent cfg management file currently on disk.
3184 # if their md5sums match, we overwrite the mypfile rather than creating a new .cfg file.
3185 # this keeps on-disk cfg management clutter to a minimum.
3186 cleanup=0
3187 if mypfile:
3188 pmd5=perform_md5(mydestdir+"/"+mypfile)
3189 if mymd5==pmd5:
3190 mydest=(mydestdir+"/"+mypfile)
3191 cleanup=1
3192 if not cleanup:
3193 # md5sums didn't match, so we create a new filename for merging.
3194 # we now have pnum set to the official 4-digit config that should be used for the file
3195 # we need to install. Set mydest to this new value.
3196 mydest=os.path.normpath(mydestdir+"/._cfg"+string.zfill(pnum,4)+"_"+pmatch)
3197 #add to our md5 list for future reference (will get written to /var/cache/edb/config)
3198 if not cfgfiledict.has_key(myrealdest):
3199 cfgfiledict[myrealdest]=[]
3200 if mymd5 not in cfgfiledict[myrealdest]:
3201 cfgfiledict[myrealdest].append(mymd5)
3202 #don't record more than 16 md5sums
3203 if len(cfgfiledict[myrealdest])>16:
3204 del cfgfiledict[myrealdest][0]
3205 # whether config protection or not, we merge the new file the same way. Unless moveme=0 (blocking directory)
3206 if moveme:
3207 mymtime=movefile(mysrc,mydest,thismtime,mystat)
3208 if mymtime!=None:
3209 zing=">>>"
3210 outfile.write("obj "+myrealdest+" "+mymd5+" "+`mymtime`+"\n")
3211 print zing,mydest
3212 else:
3213 # we are merging a fifo or device node
3214 zing="!!!"
3215 if mydmode==None:
3216 #destination doesn't exist
3217 if movefile(mysrc,mydest,thismtime,mystat)!=None:
3218 zing=">>>"
3219 if S_ISFIFO(mymode):
3220 #we don't record device nodes in CONTENTS, although we do merge them.
3221 outfile.write("fif "+myrealdest+"\n")
3222 print zing+" "+mydest
3223
3224 def merge(self,mergeroot,inforoot,myroot,myebuild=None):
3225 self.treewalk(mergeroot,myroot,inforoot,myebuild)
3226
3227 def getstring(self,name):
3228 "returns contents of a file with whitespace converted to spaces"
3229 if not os.path.exists(self.dbdir+"/"+name):
3230 return ""
3231 myfile=open(self.dbdir+"/"+name,"r")
3232 mydata=string.split(myfile.read())
3233 myfile.close()
3234 return string.join(mydata," ")
3235
3236 def copyfile(self,fname):
3237 if not os.path.exists(self.dbdir):
3238 self.create()
3239 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
3240
3241 def getfile(self,fname):
3242 if not os.path.exists(self.dbdir+"/"+fname):
3243 return ""
3244 myfile=open(self.dbdir+"/"+fname,"r")
3245 mydata=myfile.read()
3246 myfile.close()
3247 return mydata
3248
3249 def setfile(self,fname,data):
3250 if not os.path.exists(self.dbdir):
3251 self.create()
3252 myfile=open(self.dbdir+"/"+fname,"w")
3253 myfile.write(data)
3254 myfile.close()
3255
3256 def getelements(self,ename):
3257 if not os.path.exists(self.dbdir+"/"+ename):
3258 return []
3259 myelement=open(self.dbdir+"/"+ename,"r")
3260 mylines=myelement.readlines()
3261 myreturn=[]
3262 for x in mylines:
3263 for y in string.split(x[:-1]):
3264 myreturn.append(y)
3265 myelement.close()
3266 return myreturn
3267
3268 def setelements(self,mylist,ename):
3269 if not os.path.exists(self.dbdir):
3270 self.create()
3271 myelement=open(self.dbdir+"/"+ename,"w")
3272 for x in mylist:
3273 myelement.write(x+"\n")
3274 myelement.close()
3275
3276 def isregular(self):
3277 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
3278 return os.path.exists(self.dbdir+"/CATEGORY")
3279
3280 def cleanup_pkgmerge(mypkg,origdir):
3281 shutil.rmtree(settings["PKG_TMPDIR"]+"/"+mypkg)
3282 os.chdir(origdir)
3283
3284 def pkgmerge(mytbz2,myroot):
3285 """will merge a .tbz2 file, returning a list of runtime dependencies that must be
3286 satisfied, or None if there was a merge error. This code assumes the package
3287 exists."""
3288 if mytbz2[-5:]!=".tbz2":
3289 print "!!! Not a .tbz2 file"
3290 return None
3291 mypkg=os.path.basename(mytbz2)[:-5]
3292 xptbz2=xpak.tbz2(mytbz2)
3293 pkginfo={}
3294 mycat=xptbz2.getfile("CATEGORY")
3295 if not mycat:
3296 print "!!! CATEGORY info missing from info chunk, aborting..."
3297 return None
3298 mycat=mycat.strip()
3299 mycatpkg=mycat+"/"+mypkg
3300
3301 tmploc=settings["PKG_TMPDIR"]
3302 pkgloc=tmploc+"/"+mypkg+"/bin/"
3303 infloc=tmploc+"/"+mypkg+"/inf/"
3304 if os.path.exists(tmploc+"/"+mypkg):
3305 shutil.rmtree(tmploc+"/"+mypkg,1)
3306 os.makedirs(pkgloc)
3307 os.makedirs(infloc)
3308 print ">>> extracting info"
3309 xptbz2.unpackinfo(infloc)
3310 origdir=getmycwd()
3311 os.chdir(pkgloc)
3312 print ">>> extracting",mypkg
3313 notok=spawn("cat "+mytbz2+"| bzip2 -dq | tar xpf -",free=1)
3314 if notok:
3315 print "!!! Error extracting",mytbz2
3316 cleanup_pkgmerge(mypkg,origdir)
3317 return None
3318 #the merge takes care of pre/postinst and old instance auto-unmerge, virtual/provides updates, etc.
3319 mylink=dblink(mycat,mypkg,myroot)
3320 if not mylink.exists():
3321 mylink.create()
3322 #shell error code
3323 mylink.merge(pkgloc,infloc,myroot)
3324 if not os.path.exists(infloc+"/RDEPEND"):
3325 returnme=""
3326 else:
3327 #get runtime dependencies
3328 a=open(infloc+"/RDEPEND","r")
3329 returnme=string.join(string.split(a.read())," ")
3330 a.close()
3331 cleanup_pkgmerge(mypkg,origdir)
3332 return returnme
3333
3334 root=getenv("ROOT")
3335 if len(root)==0:
3336 root="/"
3337 elif root[-1]!="/":
3338 root=root+"/"
3339 if root != "/":
3340 if not os.path.exists(root[:-1]):
3341 print "!!! Error: ROOT",root,"does not exist. Please correct this."
3342 print "!!! Exiting."
3343 print
3344 sys.exit(1)
3345 elif not os.path.isdir(root[:-1]):
3346 print "!!! Error: ROOT",root[:-1],"is not a directory. Please correct this."
3347 print "!!! Exiting."
3348 print
3349 sys.exit(1)
3350
3351 #create tmp and var/tmp if they don't exist; read config
3352 os.umask(0)
3353 if not os.path.exists(root+"tmp"):
3354 print ">>> "+root+"tmp doesn't exist, creating it..."
3355 os.mkdir(root+"tmp",01777)
3356 if not os.path.exists(root+"var/tmp"):
3357 print ">>> "+root+"var/tmp doesn't exist, creating it..."
3358 os.mkdir(root+"var",0755)
3359 os.mkdir(root+"var/tmp",01777)
3360 os.umask(022)
3361 profiledir=None
3362 if os.path.exists("/etc/make.profile/make.defaults"):
3363 profiledir="/etc/make.profile"
3364 else:
3365 print ">>> Note: /etc/make.profile isn't available; an 'emerge sync' will probably fix this."
3366 #from here on in we can assume that profiledir is set to something valid
3367 db={}
3368 virts=getvirtuals("/")
3369 db["/"]={"virtuals":virts,"vartree":vartree("/",virts)}
3370 if root!="/":
3371 virts=getvirtuals(root)
3372 db[root]={"virtuals":virts,"vartree":vartree(root,virts)}
3373 #We need to create the vartree first, then load our settings, and then set up our other trees
3374 settings=config()
3375 #continue setting up other trees
3376 db["/"]["porttree"]=portagetree("/",virts)
3377 db["/"]["bintree"]=binarytree("/",virts)
3378 if root!="/":
3379 db[root]["porttree"]=portagetree(root,virts)
3380 db[root]["bintree"]=binarytree(root,virts)
3381 thirdpartymirrors=grabdict(settings["PORTDIR"]+"/profiles/thirdpartymirrors")
3382
3383 #,"porttree":portagetree(root,virts),"bintree":binarytree(root,virts)}
3384 features=settings["FEATURES"].split()
3385 #getting categories from an external file now
3386 if os.path.exists(settings["PORTDIR"]+"/profiles/categories"):
3387 categories=grabfile(settings["PORTDIR"]+"/profiles/categories")
3388 else:
3389 categories=[]

  ViewVC Help
Powered by ViewVC 1.1.20