Skip to content

transformer

xsdata.codegen.transformer

SupportedType

Bases: NamedTuple

A supported resource model representation.

Attributes:

Name Type Description
id int

The integer identifier

name str

The name of the resource type

match_uri Callable

A callable to match against URI strings

match_content Callable

A callable to match against the raw file content

Source code in xsdata/codegen/transformer.py
47
48
49
50
51
52
53
54
55
56
57
58
59
60
class SupportedType(NamedTuple):
    """A supported resource model representation.

    Attributes:
        id: The integer identifier
        name: The name of the resource type
        match_uri: A callable to match against URI strings
        match_content: A callable to match against the raw file content
    """

    id: int
    name: str
    match_uri: Callable
    match_content: Callable

ResourceTransformer

Orchestrate the code generation from a list of sources.

Supports xsd, wsdl, dtd, xml and json documents.

Parameters:

Name Type Description Default
config GeneratorConfig

Generator configuration

required

Attributes:

Name Type Description
classes List[Class]

A list of class instances

processed List[str]

A list of processed uris

preloaded Dict

A uri/content map used as cache

Source code in xsdata/codegen/transformer.py
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
class ResourceTransformer:
    """Orchestrate the code generation from a list of sources.

    Supports xsd, wsdl, dtd, xml and json documents.

    Args:
        config: Generator configuration

    Attributes:
        classes: A list of class instances
        processed: A list of processed uris
        preloaded: A uri/content map used as cache
    """

    __slots__ = ("config", "classes", "processed", "preloaded")

    def __init__(self, config: GeneratorConfig):
        self.config = config
        self.classes: List[Class] = []
        self.processed: List[str] = []
        self.preloaded: Dict = {}

    def process(self, uris: List[str], cache: bool = False):
        """Process a list of resolved URI strings.

        Args:
            uris: A list of absolute URI strings to process
            cache: Specifies whether to catch the initial parsed classes
        """
        cache_file = self.get_cache_file(uris) if cache else None
        if cache_file and cache_file.exists():
            logger.info(f"Loading from cache {cache_file}")

            self.classes = pickle.loads(cache_file.read_bytes())
        else:
            self.process_sources(uris)

        if cache_file and not cache_file.exists():
            cache_file.write_bytes(pickle.dumps(self.classes))

        try:
            self.process_classes()
        except ModuleNotFoundError:
            logger.warning("Module not found on imports validation, please report it.")
        except (CircularDependencyError, ImportError):
            raise CodegenError(
                "Circular Dependencies Found",
                help="Try a different structure style and/or enable unnest classes. "
                "Cleanup previously generated packages and modules.",
            )

        for name, times in stopwatches.items():
            logger.debug(f"{name} - {sum(times) / 1e9}s")

    def process_sources(self, uris: List[str]):
        """Process a list of resolved URI strings.

        Load the source URI strings and map them to codegen
        classes for further processing.

        Args:
            uris: A list of absolute URI strings to process
        """
        sources = defaultdict(list)
        for uri in uris:
            tp = self.classify_resource(uri)
            sources[tp].append(uri)

        self.process_definitions(sources[TYPE_DEFINITION])
        self.process_schemas(sources[TYPE_SCHEMA])
        self.process_dtds(sources[TYPE_DTD])
        self.process_xml_documents(sources[TYPE_XML])
        self.process_json_documents(sources[TYPE_JSON])

    def process_definitions(self, uris: List[str]):
        """Process a list of wsdl resources.

        Args:
            uris: A list of wsdl URI strings to process
        """
        definitions = None
        for uri in uris:
            services = self.parse_definitions(uri, namespace=None)
            if definitions is None:
                definitions = services
            elif services:
                definitions.merge(services)

        if definitions is not None:
            collections.apply(definitions.schemas, self.convert_schema)
            self.convert_definitions(definitions)

    def process_schemas(self, uris: List[str]):
        """Process a list of xsd resources.

        Args:
            uris: A list of xsd URI strings to process
        """
        for uri in uris:
            self.process_schema(uri)

    def process_dtds(self, uris: List[str]):
        """Process a list of dtd resources.

        Args:
            uris: A list of dtd URI strings to process
        """
        classes: List[Class] = []

        for uri in uris:
            input_stream = self.load_resource(uri)
            if input_stream:
                logger.info("Parsing dtd %s", uri)
                dtd = DtdParser.parse(input_stream, location=uri)

                classes.extend(DtdMapper.map(dtd))

        self.classes.extend(classes)

    def process_schema(self, uri: str, namespace: Optional[str] = None):
        """Parse and convert schema to codegen models.

        Args:
            uri: The schema URI location
            namespace: The target namespace, if the URI is
                from an inline import
        """
        schema = self.parse_schema(uri, namespace)
        if schema:
            self.convert_schema(schema)

    def process_xml_documents(self, uris: List[str]):
        """Process a list of xml resources.

        Args:
            uris: A list of xml URI strings to process
        """
        classes = []
        parser = TreeParser()
        location = os.path.dirname(uris[0]) if uris else ""
        for uri in uris:
            input_stream = self.load_resource(uri)
            if input_stream:
                logger.info("Parsing document %s", uri)
                any_element: AnyElement = parser.from_bytes(input_stream)
                classes.extend(ElementMapper.map(any_element, location))

        self.classes.extend(ClassUtils.reduce_classes(classes))

    def process_json_documents(self, uris: List[str]):
        """Process a list of json resources.

        Args:
            uris: A list of json URI strings to process
        """
        classes = []
        name = self.config.output.package.split(".")[-1]
        dirname = os.path.dirname(uris[0]) if uris else ""

        for uri in uris:
            input_stream = self.load_resource(uri)
            if input_stream:
                try:
                    data = json.load(io.BytesIO(input_stream))
                    logger.info("Parsing document %s", uri)
                    if isinstance(data, dict):
                        data = [data]

                    for obj in data:
                        classes.extend(DictMapper.map(obj, name, dirname))
                except ValueError as exc:
                    logger.warning("JSON load failed for file: %s", uri, exc_info=exc)

        self.classes.extend(ClassUtils.reduce_classes(classes))

    def process_classes(self):
        """Process the generated classes and write or print the output."""
        class_num, inner_num = self.count_classes(self.classes)
        if class_num:
            logger.info(
                "Analyzer input: %d main and %d inner classes", class_num, inner_num
            )

            classes = self.analyze_classes(self.classes)
            class_num, inner_num = self.count_classes(classes)
            logger.info(
                "Analyzer output: %d main and %d inner classes", class_num, inner_num
            )

            writer = CodeWriter.from_config(self.config)
            with stopwatch(CodeWriter.__name__):
                writer.write(classes)

    def convert_schema(self, schema: Schema):
        """Convert a schema instance to codegen classes.

        Process recursively any schema imports.

        Args:
            schema: The xsd schema instance
        """
        for sub in schema.included():
            if sub.location:
                self.process_schema(sub.location, schema.target_namespace)

        self.classes.extend(self.generate_classes(schema))

    def convert_definitions(self, definitions: Definitions):
        """Convert a definitions instance to codegen classes."""
        self.classes.extend(DefinitionsMapper.map(definitions))

    def generate_classes(self, schema: Schema) -> List[Class]:
        """Convert the given schema instance to a list of classes."""
        uri = schema.location
        logger.info("Compiling schema %s", uri or "...")
        classes = SchemaMapper.map(schema)

        class_num, inner_num = self.count_classes(classes)
        if class_num > 0:
            logger.info("Builder: %d main and %d inner classes", class_num, inner_num)

        return classes

    def parse_schema(self, uri: str, namespace: Optional[str]) -> Optional[Schema]:
        """Parse the given URI and return the schema instance.

        Args:
            uri: The resource URI
            namespace: The target namespace
        """
        input_stream = self.load_resource(uri)
        if input_stream is None:
            return None

        logger.info("Parsing schema %s", uri)
        parser = SchemaParser(target_namespace=namespace, location=uri)
        return parser.from_bytes(input_stream, Schema)

    def parse_definitions(
        self,
        uri: str,
        namespace: Optional[str],
    ) -> Optional[Definitions]:
        """Parse recursively the given URI and return the definitions instance.

        Args:
            uri: The resource URI
            namespace: The target namespace
        """
        input_stream = self.load_resource(uri)
        if input_stream is None:
            return None

        parser = DefinitionsParser(target_namespace=namespace, location=uri)
        definitions = parser.from_bytes(input_stream, Definitions)
        namespace = definitions.target_namespace

        for imp in definitions.imports:
            if not imp.location:
                continue

            if imp.location.endswith("wsdl"):
                sub_definition = self.parse_definitions(imp.location, namespace)
                if sub_definition:
                    definitions.merge(sub_definition)
            else:
                self.process_schema(imp.location)

        return definitions

    def load_resource(self, uri: str) -> Optional[bytes]:
        """Read and return the contents of the given URI.

        Args:
            uri: The resource URI

        Returns:
            The raw bytes content or None if the resource could not be read
        """
        if uri not in self.processed:
            try:
                self.processed.append(uri)
                return self.preloaded.pop(uri, None) or opener.open(uri).read()  # nosec
            except OSError:
                logger.warning("Resource not found %s", uri)
        else:
            logger.debug("Skipping already processed: %s", uri)

        return None

    def classify_resource(self, uri: str) -> int:
        """Detect the resource type by the URI extension or the contents.

        Args:
            uri: The resource URI

        Returns:
            The resource integer identifier.
        """
        for supported_type in supported_types:
            if supported_type.match_uri(uri):
                return supported_type.id

        src = self.load_resource(uri)
        if src is not None:
            self.preloaded[uri] = src
            self.processed.clear()
            text = src.decode("utf-8").strip()

            for supported_type in supported_types:
                if supported_type.match_content(text):
                    return supported_type.id

        return TYPE_UNKNOWN

    def analyze_classes(self, classes: List[Class]) -> List[Class]:
        """Analyzer the given class list and return the final list of classes."""
        container = ClassContainer(config=self.config)
        container.extend(classes)
        container.process()
        return list(container)

    def count_classes(self, classes: List[Class]) -> Tuple[int, int]:
        """Return a tuple of counters for the main and inner classes.

        Args:
            classes: A list of class instances

        Returns:
            A tuple of root, inner counters, e.g. (100, 5)
        """
        main = len(classes)
        inner = 0
        for cls in classes:
            inner += sum(self.count_classes(cls.inner))

        return main, inner

    @classmethod
    def get_cache_file(cls, uris: List[str]) -> Path:
        """Return the cache path for the raw mapped classes.

        Args:
            uris: A list of URI strings

        Returns:
            A temporary file path instance
        """
        key = hashlib.md5("".join(uris).encode()).hexdigest()
        tempdir = tempfile.gettempdir()
        return Path(tempdir).joinpath(f"xsdata.{__version__}.{key}.cache")

process(uris, cache=False)

Process a list of resolved URI strings.

Parameters:

Name Type Description Default
uris List[str]

A list of absolute URI strings to process

required
cache bool

Specifies whether to catch the initial parsed classes

False
Source code in xsdata/codegen/transformer.py
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
def process(self, uris: List[str], cache: bool = False):
    """Process a list of resolved URI strings.

    Args:
        uris: A list of absolute URI strings to process
        cache: Specifies whether to catch the initial parsed classes
    """
    cache_file = self.get_cache_file(uris) if cache else None
    if cache_file and cache_file.exists():
        logger.info(f"Loading from cache {cache_file}")

        self.classes = pickle.loads(cache_file.read_bytes())
    else:
        self.process_sources(uris)

    if cache_file and not cache_file.exists():
        cache_file.write_bytes(pickle.dumps(self.classes))

    try:
        self.process_classes()
    except ModuleNotFoundError:
        logger.warning("Module not found on imports validation, please report it.")
    except (CircularDependencyError, ImportError):
        raise CodegenError(
            "Circular Dependencies Found",
            help="Try a different structure style and/or enable unnest classes. "
            "Cleanup previously generated packages and modules.",
        )

    for name, times in stopwatches.items():
        logger.debug(f"{name} - {sum(times) / 1e9}s")

process_sources(uris)

Process a list of resolved URI strings.

Load the source URI strings and map them to codegen classes for further processing.

Parameters:

Name Type Description Default
uris List[str]

A list of absolute URI strings to process

required
Source code in xsdata/codegen/transformer.py
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
def process_sources(self, uris: List[str]):
    """Process a list of resolved URI strings.

    Load the source URI strings and map them to codegen
    classes for further processing.

    Args:
        uris: A list of absolute URI strings to process
    """
    sources = defaultdict(list)
    for uri in uris:
        tp = self.classify_resource(uri)
        sources[tp].append(uri)

    self.process_definitions(sources[TYPE_DEFINITION])
    self.process_schemas(sources[TYPE_SCHEMA])
    self.process_dtds(sources[TYPE_DTD])
    self.process_xml_documents(sources[TYPE_XML])
    self.process_json_documents(sources[TYPE_JSON])

process_definitions(uris)

Process a list of wsdl resources.

Parameters:

Name Type Description Default
uris List[str]

A list of wsdl URI strings to process

required
Source code in xsdata/codegen/transformer.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def process_definitions(self, uris: List[str]):
    """Process a list of wsdl resources.

    Args:
        uris: A list of wsdl URI strings to process
    """
    definitions = None
    for uri in uris:
        services = self.parse_definitions(uri, namespace=None)
        if definitions is None:
            definitions = services
        elif services:
            definitions.merge(services)

    if definitions is not None:
        collections.apply(definitions.schemas, self.convert_schema)
        self.convert_definitions(definitions)

process_schemas(uris)

Process a list of xsd resources.

Parameters:

Name Type Description Default
uris List[str]

A list of xsd URI strings to process

required
Source code in xsdata/codegen/transformer.py
189
190
191
192
193
194
195
196
def process_schemas(self, uris: List[str]):
    """Process a list of xsd resources.

    Args:
        uris: A list of xsd URI strings to process
    """
    for uri in uris:
        self.process_schema(uri)

process_dtds(uris)

Process a list of dtd resources.

Parameters:

Name Type Description Default
uris List[str]

A list of dtd URI strings to process

required
Source code in xsdata/codegen/transformer.py
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
def process_dtds(self, uris: List[str]):
    """Process a list of dtd resources.

    Args:
        uris: A list of dtd URI strings to process
    """
    classes: List[Class] = []

    for uri in uris:
        input_stream = self.load_resource(uri)
        if input_stream:
            logger.info("Parsing dtd %s", uri)
            dtd = DtdParser.parse(input_stream, location=uri)

            classes.extend(DtdMapper.map(dtd))

    self.classes.extend(classes)

process_schema(uri, namespace=None)

Parse and convert schema to codegen models.

Parameters:

Name Type Description Default
uri str

The schema URI location

required
namespace Optional[str]

The target namespace, if the URI is from an inline import

None
Source code in xsdata/codegen/transformer.py
216
217
218
219
220
221
222
223
224
225
226
def process_schema(self, uri: str, namespace: Optional[str] = None):
    """Parse and convert schema to codegen models.

    Args:
        uri: The schema URI location
        namespace: The target namespace, if the URI is
            from an inline import
    """
    schema = self.parse_schema(uri, namespace)
    if schema:
        self.convert_schema(schema)

process_xml_documents(uris)

Process a list of xml resources.

Parameters:

Name Type Description Default
uris List[str]

A list of xml URI strings to process

required
Source code in xsdata/codegen/transformer.py
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
def process_xml_documents(self, uris: List[str]):
    """Process a list of xml resources.

    Args:
        uris: A list of xml URI strings to process
    """
    classes = []
    parser = TreeParser()
    location = os.path.dirname(uris[0]) if uris else ""
    for uri in uris:
        input_stream = self.load_resource(uri)
        if input_stream:
            logger.info("Parsing document %s", uri)
            any_element: AnyElement = parser.from_bytes(input_stream)
            classes.extend(ElementMapper.map(any_element, location))

    self.classes.extend(ClassUtils.reduce_classes(classes))

process_json_documents(uris)

Process a list of json resources.

Parameters:

Name Type Description Default
uris List[str]

A list of json URI strings to process

required
Source code in xsdata/codegen/transformer.py
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
def process_json_documents(self, uris: List[str]):
    """Process a list of json resources.

    Args:
        uris: A list of json URI strings to process
    """
    classes = []
    name = self.config.output.package.split(".")[-1]
    dirname = os.path.dirname(uris[0]) if uris else ""

    for uri in uris:
        input_stream = self.load_resource(uri)
        if input_stream:
            try:
                data = json.load(io.BytesIO(input_stream))
                logger.info("Parsing document %s", uri)
                if isinstance(data, dict):
                    data = [data]

                for obj in data:
                    classes.extend(DictMapper.map(obj, name, dirname))
            except ValueError as exc:
                logger.warning("JSON load failed for file: %s", uri, exc_info=exc)

    self.classes.extend(ClassUtils.reduce_classes(classes))

process_classes()

Process the generated classes and write or print the output.

Source code in xsdata/codegen/transformer.py
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
def process_classes(self):
    """Process the generated classes and write or print the output."""
    class_num, inner_num = self.count_classes(self.classes)
    if class_num:
        logger.info(
            "Analyzer input: %d main and %d inner classes", class_num, inner_num
        )

        classes = self.analyze_classes(self.classes)
        class_num, inner_num = self.count_classes(classes)
        logger.info(
            "Analyzer output: %d main and %d inner classes", class_num, inner_num
        )

        writer = CodeWriter.from_config(self.config)
        with stopwatch(CodeWriter.__name__):
            writer.write(classes)

convert_schema(schema)

Convert a schema instance to codegen classes.

Process recursively any schema imports.

Parameters:

Name Type Description Default
schema Schema

The xsd schema instance

required
Source code in xsdata/codegen/transformer.py
290
291
292
293
294
295
296
297
298
299
300
301
302
def convert_schema(self, schema: Schema):
    """Convert a schema instance to codegen classes.

    Process recursively any schema imports.

    Args:
        schema: The xsd schema instance
    """
    for sub in schema.included():
        if sub.location:
            self.process_schema(sub.location, schema.target_namespace)

    self.classes.extend(self.generate_classes(schema))

convert_definitions(definitions)

Convert a definitions instance to codegen classes.

Source code in xsdata/codegen/transformer.py
304
305
306
def convert_definitions(self, definitions: Definitions):
    """Convert a definitions instance to codegen classes."""
    self.classes.extend(DefinitionsMapper.map(definitions))

generate_classes(schema)

Convert the given schema instance to a list of classes.

Source code in xsdata/codegen/transformer.py
308
309
310
311
312
313
314
315
316
317
318
def generate_classes(self, schema: Schema) -> List[Class]:
    """Convert the given schema instance to a list of classes."""
    uri = schema.location
    logger.info("Compiling schema %s", uri or "...")
    classes = SchemaMapper.map(schema)

    class_num, inner_num = self.count_classes(classes)
    if class_num > 0:
        logger.info("Builder: %d main and %d inner classes", class_num, inner_num)

    return classes

parse_schema(uri, namespace)

Parse the given URI and return the schema instance.

Parameters:

Name Type Description Default
uri str

The resource URI

required
namespace Optional[str]

The target namespace

required
Source code in xsdata/codegen/transformer.py
320
321
322
323
324
325
326
327
328
329
330
331
332
333
def parse_schema(self, uri: str, namespace: Optional[str]) -> Optional[Schema]:
    """Parse the given URI and return the schema instance.

    Args:
        uri: The resource URI
        namespace: The target namespace
    """
    input_stream = self.load_resource(uri)
    if input_stream is None:
        return None

    logger.info("Parsing schema %s", uri)
    parser = SchemaParser(target_namespace=namespace, location=uri)
    return parser.from_bytes(input_stream, Schema)

parse_definitions(uri, namespace)

Parse recursively the given URI and return the definitions instance.

Parameters:

Name Type Description Default
uri str

The resource URI

required
namespace Optional[str]

The target namespace

required
Source code in xsdata/codegen/transformer.py
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
def parse_definitions(
    self,
    uri: str,
    namespace: Optional[str],
) -> Optional[Definitions]:
    """Parse recursively the given URI and return the definitions instance.

    Args:
        uri: The resource URI
        namespace: The target namespace
    """
    input_stream = self.load_resource(uri)
    if input_stream is None:
        return None

    parser = DefinitionsParser(target_namespace=namespace, location=uri)
    definitions = parser.from_bytes(input_stream, Definitions)
    namespace = definitions.target_namespace

    for imp in definitions.imports:
        if not imp.location:
            continue

        if imp.location.endswith("wsdl"):
            sub_definition = self.parse_definitions(imp.location, namespace)
            if sub_definition:
                definitions.merge(sub_definition)
        else:
            self.process_schema(imp.location)

    return definitions

load_resource(uri)

Read and return the contents of the given URI.

Parameters:

Name Type Description Default
uri str

The resource URI

required

Returns:

Type Description
Optional[bytes]

The raw bytes content or None if the resource could not be read

Source code in xsdata/codegen/transformer.py
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
def load_resource(self, uri: str) -> Optional[bytes]:
    """Read and return the contents of the given URI.

    Args:
        uri: The resource URI

    Returns:
        The raw bytes content or None if the resource could not be read
    """
    if uri not in self.processed:
        try:
            self.processed.append(uri)
            return self.preloaded.pop(uri, None) or opener.open(uri).read()  # nosec
        except OSError:
            logger.warning("Resource not found %s", uri)
    else:
        logger.debug("Skipping already processed: %s", uri)

    return None

classify_resource(uri)

Detect the resource type by the URI extension or the contents.

Parameters:

Name Type Description Default
uri str

The resource URI

required

Returns:

Type Description
int

The resource integer identifier.

Source code in xsdata/codegen/transformer.py
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
def classify_resource(self, uri: str) -> int:
    """Detect the resource type by the URI extension or the contents.

    Args:
        uri: The resource URI

    Returns:
        The resource integer identifier.
    """
    for supported_type in supported_types:
        if supported_type.match_uri(uri):
            return supported_type.id

    src = self.load_resource(uri)
    if src is not None:
        self.preloaded[uri] = src
        self.processed.clear()
        text = src.decode("utf-8").strip()

        for supported_type in supported_types:
            if supported_type.match_content(text):
                return supported_type.id

    return TYPE_UNKNOWN

analyze_classes(classes)

Analyzer the given class list and return the final list of classes.

Source code in xsdata/codegen/transformer.py
412
413
414
415
416
417
def analyze_classes(self, classes: List[Class]) -> List[Class]:
    """Analyzer the given class list and return the final list of classes."""
    container = ClassContainer(config=self.config)
    container.extend(classes)
    container.process()
    return list(container)

count_classes(classes)

Return a tuple of counters for the main and inner classes.

Parameters:

Name Type Description Default
classes List[Class]

A list of class instances

required

Returns:

Type Description
Tuple[int, int]

A tuple of root, inner counters, e.g. (100, 5)

Source code in xsdata/codegen/transformer.py
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
def count_classes(self, classes: List[Class]) -> Tuple[int, int]:
    """Return a tuple of counters for the main and inner classes.

    Args:
        classes: A list of class instances

    Returns:
        A tuple of root, inner counters, e.g. (100, 5)
    """
    main = len(classes)
    inner = 0
    for cls in classes:
        inner += sum(self.count_classes(cls.inner))

    return main, inner

get_cache_file(uris) classmethod

Return the cache path for the raw mapped classes.

Parameters:

Name Type Description Default
uris List[str]

A list of URI strings

required

Returns:

Type Description
Path

A temporary file path instance

Source code in xsdata/codegen/transformer.py
435
436
437
438
439
440
441
442
443
444
445
446
447
@classmethod
def get_cache_file(cls, uris: List[str]) -> Path:
    """Return the cache path for the raw mapped classes.

    Args:
        uris: A list of URI strings

    Returns:
        A temporary file path instance
    """
    key = hashlib.md5("".join(uris).encode()).hexdigest()
    tempdir = tempfile.gettempdir()
    return Path(tempdir).joinpath(f"xsdata.{__version__}.{key}.cache")