|
From: <jw...@us...> - 2010-08-09 14:08:16
|
Revision: 2013
http://edk2-buildtools.svn.sourceforge.net/edk2-buildtools/?rev=2013&view=rev
Author: jwang36
Date: 2010-08-09 14:08:10 +0000 (Mon, 09 Aug 2010)
Log Message:
-----------
Updated tools code to support BSF file generation.
Modified Paths:
--------------
trunk/BaseTools/Source/Python/AutoGen/AutoGen.py
trunk/BaseTools/Source/Python/AutoGen/GenC.py
trunk/BaseTools/Source/Python/Common/Misc.py
trunk/BaseTools/Source/Python/Common/String.py
trunk/BaseTools/Source/Python/CommonDataClass/DataClass.py
trunk/BaseTools/Source/Python/Workspace/MetaFileParser.py
trunk/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py
Modified: trunk/BaseTools/Source/Python/AutoGen/AutoGen.py
===================================================================
--- trunk/BaseTools/Source/Python/AutoGen/AutoGen.py 2010-08-09 10:04:28 UTC (rev 2012)
+++ trunk/BaseTools/Source/Python/AutoGen/AutoGen.py 2010-08-09 14:08:10 UTC (rev 2013)
@@ -770,10 +770,14 @@
## Get list of non-dynamic PCDs
def _GetNonDynamicPcdList(self):
+ if self._NonDynamicPcdList == None:
+ self.CollectPlatformDynamicPcds()
return self._NonDynamicPcdList
## Get list of dynamic PCDs
def _GetDynamicPcdList(self):
+ if self._DynamicPcdList == None:
+ self.CollectPlatformDynamicPcds()
return self._DynamicPcdList
## Generate Token Number for all PCD
Modified: trunk/BaseTools/Source/Python/AutoGen/GenC.py
===================================================================
--- trunk/BaseTools/Source/Python/AutoGen/GenC.py 2010-08-09 10:04:28 UTC (rev 2012)
+++ trunk/BaseTools/Source/Python/AutoGen/GenC.py 2010-08-09 14:08:10 UTC (rev 2013)
@@ -1262,10 +1262,11 @@
VariableHeadValueList = []
Pcd.InitString = 'UNINIT'
- if Pcd.Type in ["DynamicVpd", "DynamicExVpd"]:
- Pcd.TokenTypeList = ['PCD_TYPE_VPD']
- elif Pcd.DatumType == 'VOID*':
- Pcd.TokenTypeList = ['PCD_TYPE_STRING']
+ if Pcd.DatumType == 'VOID*':
+ if Pcd.Type not in ["DynamicVpd", "DynamicExVpd"]:
+ Pcd.TokenTypeList = ['PCD_TYPE_STRING']
+ else:
+ Pcd.TokenTypeList = []
elif Pcd.DatumType == 'BOOLEAN':
Pcd.TokenTypeList = ['PCD_DATUM_TYPE_UINT8']
else:
Modified: trunk/BaseTools/Source/Python/Common/Misc.py
===================================================================
--- trunk/BaseTools/Source/Python/Common/Misc.py 2010-08-09 10:04:28 UTC (rev 2012)
+++ trunk/BaseTools/Source/Python/Common/Misc.py 2010-08-09 14:08:10 UTC (rev 2013)
@@ -719,7 +719,7 @@
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
- if MatchEnd < len(Template):
+ if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
Modified: trunk/BaseTools/Source/Python/Common/String.py
===================================================================
--- trunk/BaseTools/Source/Python/Common/String.py 2010-08-09 10:04:28 UTC (rev 2012)
+++ trunk/BaseTools/Source/Python/Common/String.py 2010-08-09 14:08:10 UTC (rev 2013)
@@ -296,6 +296,50 @@
return Line
+## CleanString2
+#
+# Split comments in a string
+# Remove spaces
+#
+# @param Line: The string to be cleaned
+# @param CommentCharacter: Comment char, used to ignore comment content, default is DataType.TAB_COMMENT_SPLIT
+#
+# @retval Path Formatted path
+#
+def CleanString2(Line, CommentCharacter = DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
+ #
+ # remove whitespace
+ #
+ Line = Line.strip();
+ #
+ # Replace R8's comment character
+ #
+ if AllowCppStyleComment:
+ Line = Line.replace(DataType.TAB_COMMENT_R8_SPLIT, CommentCharacter)
+ #
+ # separate comments and statements
+ #
+ LineParts = Line.split(CommentCharacter, 1);
+ #
+ # remove whitespace again
+ #
+ Line = LineParts[0].strip();
+ if len(LineParts) > 1:
+ Comment = LineParts[1].strip()
+ # Remove prefixed and trailing comment characters
+ Start = 0
+ End = len(Comment)
+ while Start < End and Comment.startswith(CommentCharacter, Start, End):
+ Start += 1
+ while End >= 0 and Comment.endswith(CommentCharacter, Start, End):
+ End -= 1
+ Comment = Comment[Start:End]
+ Comment = Comment.strip()
+ else:
+ Comment = ''
+
+ return Line, Comment
+
## GetMultipleValuesOfKeyFromLines
#
# Parse multiple strings to clean comment and spaces
Modified: trunk/BaseTools/Source/Python/CommonDataClass/DataClass.py
===================================================================
--- trunk/BaseTools/Source/Python/CommonDataClass/DataClass.py 2010-08-09 10:04:28 UTC (rev 2012)
+++ trunk/BaseTools/Source/Python/CommonDataClass/DataClass.py 2010-08-09 14:08:10 UTC (rev 2013)
@@ -91,6 +91,7 @@
MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSEIF = 50013
MODEL_META_DATA_CONDITIONAL_STATEMENT_ENDIF = 5014
MODEL_META_DATA_COMPONENT_SOURCE_OVERRIDE_PATH = 5015
+MODEL_META_DATA_COMMENT = 5016
MODEL_EXTERNAL_DEPENDENCY = 10000
@@ -159,7 +160,8 @@
("MODEL_META_DATA_COMPONENT", MODEL_META_DATA_COMPONENT),
('MODEL_META_DATA_USER_EXTENSION', MODEL_META_DATA_USER_EXTENSION),
('MODEL_META_DATA_PACKAGE', MODEL_META_DATA_PACKAGE),
- ('MODEL_META_DATA_NMAKE', MODEL_META_DATA_NMAKE)
+ ('MODEL_META_DATA_NMAKE', MODEL_META_DATA_NMAKE),
+ ('MODEL_META_DATA_COMMENT', MODEL_META_DATA_COMMENT)
]
## FunctionClass
Modified: trunk/BaseTools/Source/Python/Workspace/MetaFileParser.py
===================================================================
--- trunk/BaseTools/Source/Python/Workspace/MetaFileParser.py 2010-08-09 10:04:28 UTC (rev 2012)
+++ trunk/BaseTools/Source/Python/Workspace/MetaFileParser.py 2010-08-09 14:08:10 UTC (rev 2013)
@@ -564,6 +564,7 @@
# sections which allow "!include" directive
_IncludeAllowedSection = [
+ TAB_COMMON_DEFINES.upper(),
TAB_LIBRARIES.upper(),
TAB_LIBRARY_CLASSES.upper(),
TAB_SKUIDS.upper(),
@@ -781,6 +782,7 @@
self._SectionType = Parser._SectionType
self._Scope = Parser._Scope
self._Enabled = Parser._Enabled
+ self._Macros.update(Parser._Macros)
else:
if DirectiveName in ["!IF", "!IFDEF", "!IFNDEF"]:
# evaluate the expression
@@ -965,6 +967,7 @@
#
def __init__(self, FilePath, FileType, Table, Macro=None):
MetaFileParser.__init__(self, FilePath, FileType, Table, Macro, -1)
+ self._Comments = []
## Parser starter
def Start(self):
@@ -975,27 +978,34 @@
EdkLogger.error("Parser", FILE_READ_FAILURE, ExtraData=self.MetaFile)
for Index in range(0, len(self._Content)):
- Line = CleanString(self._Content[Index])
+ Line, Comment = CleanString2(self._Content[Index])
+ self._CurrentLine = Line
+ self._LineIndex = Index
+
+ # save comment for later use
+ if Comment:
+ self._Comments.append((Comment, self._LineIndex+1))
# skip empty line
if Line == '':
continue
- self._CurrentLine = Line
- self._LineIndex = Index
# section header
if Line[0] == TAB_SECTION_START and Line[-1] == TAB_SECTION_END:
self._SectionHeaderParser()
+ self._Comments = []
continue
elif Line.startswith('DEFINE '):
self._MacroParser()
continue
elif len(self._SectionType) == 0:
+ self._Comments = []
continue
# section content
self._ValueList = ['','','']
self._SectionParser[self._SectionType[0]](self)
if self._ValueList == None:
+ self._Comments = []
continue
#
@@ -1017,6 +1027,22 @@
-1,
0
)
+ for Comment, LineNo in self._Comments:
+ self._Store(
+ MODEL_META_DATA_COMMENT,
+ Comment,
+ self._ValueList[0],
+ self._ValueList[1],
+ Arch,
+ ModuleType,
+ self._LastItem,
+ LineNo,
+ -1,
+ LineNo,
+ -1,
+ 0
+ )
+ self._Comments = []
self._Done()
## Section header parser
Modified: trunk/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py
===================================================================
--- trunk/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py 2010-08-09 10:04:28 UTC (rev 2012)
+++ trunk/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py 2010-08-09 14:08:10 UTC (rev 2013)
@@ -280,6 +280,8 @@
def _SetSkuName(self, Value):
if Value in self.SkuIds:
self._SkuName = Value
+ # Needs to re-retrieve the PCD information
+ self._Pcds = None
def _GetFdfFile(self):
if self._FlashDefinition == None:
@@ -712,32 +714,22 @@
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid in PcdSet:
- ValueList = ['', '']
+ ValueList = ['', '', '']
Setting = PcdDict[self._Arch, self.SkuName, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
TokenList = Setting.split(TAB_VALUE_SPLIT)
- # The TokenList have optional data, process flow will base on it's length
- if len(TokenList) == 1:
- VpdOffset = TokenList[0]
- MaxDatumSize, PcdValue = None, ''
- elif len(TokenList) == 2:
- VpdOffset, MaxDatumSize = TokenList[0:len(TokenList)]
- PcdValue = ''
- elif len(TokenList) == 3:
- VpdOffset, MaxDatumSize, PcdValue = TokenList[0:len(TokenList)]
- # Error format of vpd definition
- else:
- EdkLogger.error("build", FORMAT_INVALID, "Error format of VPD pcd definition.", File=self.MetaFile)
-
- SkuInfo = SkuInfoClass(self.SkuName, self.SkuIds[self.SkuName], '', '', '', '', VpdOffset)
+ ValueList[0:len(TokenList)] = TokenList
+ VpdOffset, MaxDatumSize, InitialValue = ValueList
+
+ SkuInfo = SkuInfoClass(self.SkuName, self.SkuIds[self.SkuName], '', '', '', '', VpdOffset, InitialValue)
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
'',
- PcdValue,
'',
+ '',
MaxDatumSize,
{self.SkuName : SkuInfo},
False,
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|