Commits

Chris Mutel  committed 222a298

Initial commit

  • Participants

Comments (0)

Files changed (10)

+syntax:glob
+*.so
+dist
+build
+MANIFEST
+*.pyc
+Copyright (c) 2012, Chris Mutel and ETH Zürich
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+Neither the name of ETH Zürich nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# file GENERATED by distutils, do NOT edit
+LICENSE.txt
+README.txt
+setup.py
+bw2calc/__init__.py
+bw2calc/errors.py
+bw2calc/fallbacks.py
+bw2calc/lca.py
+bw2calc/merge_sparse_blocks.py
+bw2calc/monte_carlo.py
+include *.txt
+include bw2calc/*.py
+include bw2calc/indexer/*.py
+include bw2calc/indexer/_indexer.pyx
+include bw2calc/indexer/_indexer.c
+The calculation engine for the Brightway2 life cycle assessment framework.

File bw2calc/__init__.py

+from lca import LCA
+from monte_carlo import MonteCarloLCA

File bw2calc/fallbacks.py

+# -*- coding: utf-8 -*
+from brightway2.utils import MAX_INT_32
+import numpy as np
+import itertools
+
+
+def indexer(array_from, array_to, mapping):
+    """
+Chosen algorithm is faster than using rankdata, or simple iteration.
+Modifies numpy arrays in place.
+
+map(dict.get, data) seems faster, but doesn't work with missing values.
+
+Setup code to try new approaches::
+
+    from numpy import *
+    from time import time
+    a = random.random_integers(5000, size=100000)
+    indices = sorted(list(set([int(x) for x in a])))
+    mapping = dict(zip(indices, range(len(indices))))
+    b = zeros(a.shape[0])
+
+    """
+    array_to[:] = np.array([mapping.get(x, MAX_INT_32) for x in array_from])
+
+
+def dicter(array):
+    """
+Create dictionary from the sorted, unique values in a numpy array
+
+Already more than fast enough. Setup code for other approaches::
+
+        %%timeit import numpy as np; array = np.random.randint(0, 5000, size=50000)
+        dict(zip(np.sort(np.unique(array)), itertools.count()))
+
+    """
+    return dict(zip(np.sort(np.unique(array)), itertools.count()))

File bw2calc/lca.py

+# -*- coding: utf-8 -*
+from __future__ import division
+from brightway2 import config as base_config
+from brightway2 import meta, methods, mapping
+from brightway2.proxies import OneDimensionalArrayProxy, \
+    CompressedSparseMatrixProxy
+from brightway2.utils import MAX_INT_32
+from fallbacks import dicter
+from scipy.sparse.linalg import factorized, spsolve
+from scipy import sparse
+import numpy as np
+import os
+try:
+    import cPickle as pickle
+except ImportError:
+    import pickle
+try:
+    from bw2speedups import indexer
+except ImportError:
+    from fallbacks import indexer
+
+
+class LCA(object):
+    def __init__(self, demand, method=None, config=None):
+        self.config = config or base_config
+        self.demand = demand
+        self.method = method
+        self.databases = self.get_databases()
+
+    def get_databases(self):
+        """Get list of databases for functional unit"""
+        return set.union(*[set(meta[key[0]]["depends"] + [key[0]]) for key \
+            in self.demand])
+
+    def load_databases(self):
+        params = np.hstack([pickle.load(open(os.path.join(
+            self.config.dir, "processed", "%s.pickle" % name), "rb")
+            ) for name in self.databases])
+        # Technosphere
+        self.tech_params = params[np.where(params['technosphere'] == True)]
+        self.bio_params = params[np.where(params['technosphere'] == False)]
+        self.technosphere_dict = self.build_dictionary(np.hstack((
+            self.tech_params['input'], self.tech_params['output'],
+            self.bio_params['output'])))
+        self.add_matrix_indices(self.tech_params['input'], self.tech_params['row'],
+            self.technosphere_dict)
+        self.add_matrix_indices(self.tech_params['output'], self.tech_params['col'],
+            self.technosphere_dict)
+        # Biosphere
+        self.biosphere_dict = self.build_dictionary(self.bio_params['input'])
+        self.add_matrix_indices(self.bio_params['input'], self.bio_params['row'],
+            self.biosphere_dict)
+        self.add_matrix_indices(self.bio_params['output'], self.bio_params['col'],
+            self.technosphere_dict)
+
+    def load_method(self):
+        params = pickle.load(open(os.path.join(self.config.dir, "processed",
+            "%s.pickle" % methods[self.method]['abbreviation']), "rb"))
+        self.add_matrix_indices(params['flow'], params['index'],
+            self.biosphere_dict)
+        # Eliminate references to biosphere flows that don't appear in this
+        # assessment; they are masked with MAX_INT_32 values
+        self.cf_params = params[np.where(params['index'] != MAX_INT_32)]
+
+    def build_technosphere_matrix(self, vector=None):
+        vector = self.tech_params['amount'] if vector is None else vector
+        count = len(self.technosphere_dict)
+        indices = range(count)
+        # Add ones along the diagonal
+        data = np.hstack((-1 * vector, np.ones((count,))))
+        rows = np.hstack((self.tech_params['row'], indices))
+        cols = np.hstack((self.tech_params['col'], indices))
+        # coo_matrix construction is coo_matrix((values, (rows, cols)),
+        # (row_count, col_count))
+        self.technosphere_matrix = CompressedSparseMatrixProxy(
+            sparse.coo_matrix((data, (rows, cols)), (count, count)).tocsr(),
+            self.technosphere_dict, self.technosphere_dict)
+
+    def build_biosphere_matrix(self, vector=None):
+        vector = self.bio_params['amount'] if vector is None else vector
+        row_count = len(self.biosphere_dict)
+        col_count = len(self.technosphere_dict)
+        # coo_matrix construction is coo_matrix((values, (rows, cols)),
+        # (row_count, col_count))
+        self.biosphere_matrix = CompressedSparseMatrixProxy(
+            sparse.coo_matrix((vector, (self.bio_params['row'],
+            self.bio_params['col'])), (row_count, col_count)).tocsr(),
+            self.biosphere_dict, self.technosphere_dict)
+
+    def decompose_technosphere(self):
+        self.solver = factorized(self.technosphere_matrix.data.tocsc())
+
+    def build_demand_array(self, demand=None):
+        demand = demand or self.demand
+        self.demand_array = OneDimensionalArrayProxy(
+            np.zeros(len(self.technosphere_dict)),
+            self.technosphere_dict)
+        for key in demand:
+            self.demand_array[mapping[key]] = self.demand[key]
+
+    def build_characterization_matrix(self, vector=None):
+        vector = self.cf_params['amount'] if vector is None else vector
+        count = len(self.biosphere_dict)
+        self.characterization_matrix = CompressedSparseMatrixProxy(
+            sparse.coo_matrix((vector, (self.cf_params['index'], self.cf_params['index'])),
+            (count, count)).tocsr(),
+            self.biosphere_dict, self.biosphere_dict)
+
+    def build_dictionary(self, array):
+        """Build a dictionary from the sorted, unique elements of an array"""
+        return dicter(array)
+
+    def add_matrix_indices(self, array_from, array_to, mapping):
+        """Map ``array_from`` keys to ``array_to`` values using ``mapping``"""
+        indexer(array_from, array_to, mapping)
+
+    def solve_linear_system(self):
+        """
+Master solution function for linear system a*x=b.
+
+This is separate from 'solve_system' so that it can be easily subclassed or
+replaced. Can be passed the technosphere matrix and demand vector, but is
+normally can be called directly; normal parameters are assumed.
+
+Default uses UMFpack, and stores the context so that solving subsequent times
+are quick. "To most numerical analysts, matrix inversion is a sin." - Nicolas
+Higham, Accuracy and Stability of Numerical Algorithms, 2002, p. 260.
+        """
+        if hasattr(self, "solver"):
+            return self.solver(self.demand_array.data)
+        else:
+            return spsolve(
+                self.technosphere_matrix.data,
+                self.demand_array.data)
+
+    def lci(self, factorize=False):
+        """Life cycle inventory"""
+        self.load_databases()
+        self.build_technosphere_matrix()
+        self.build_biosphere_matrix()
+        self.build_demand_array()
+        if factorize:
+            self.decompose_technosphere()
+        self.lci_calculation()
+
+    def lci_calculation(self):
+        self.supply_array = OneDimensionalArrayProxy(
+            self.solve_linear_system(),
+            self.technosphere_dict)
+        count = len(self.technosphere_dict)
+        self.inventory = CompressedSparseMatrixProxy(
+            self.biosphere_matrix.data * \
+            sparse.spdiags([self.supply_array.data], [0], count, count),
+            self.biosphere_dict, self.technosphere_dict)
+
+    def redo_lci(self, demand):
+        """Redo LCI with same databases but different demand"""
+        assert hasattr(self, "inventory"), "Must do lci first"
+        self.build_demand_array(demand)
+        self.lci_calculation()
+
+    def lcia(self):
+        """Life cycle impact assessment"""
+        assert hasattr(self, "inventory"), "Must do lci first"
+        assert self.method, "Must specify a method to perform LCIA"
+        self.load_method()
+        self.build_characterization_matrix()
+        self.lcia_calculation()
+
+    def lcia_calculation(self):
+        self.characterized_inventory = CompressedSparseMatrixProxy(
+            self.characterization_matrix.data * self.inventory.data,
+            self.biosphere_dict, self.technosphere_dict)
+
+    def redo_lcia(self, demand=None):
+        assert hasattr(self, "characterized_inventory"), "Must do LCIA first"
+        if demand:
+            self.redo_lci(demand)
+        self.lcia_calculation()
+
+    @property
+    def score(self):
+        assert hasattr(self, "characterized_inventory"), "Must do LCIA first"
+        return self.characterized_inventory.sum()

File bw2calc/monte_carlo.py

+from lca import LCA
+from scipy.sparse.linalg import iterative, spsolve
+from stats_toolkit.random import MCRandomNumberGenerator
+
+
+class MonteCarloLCA(LCA):
+    def __init__(self, activity, method=None, iter_solver=iterative.cgs,
+            seed=None, *args, **kwargs):
+        super(MonteCarloLCA, self).__init__(activity, method=method, *args,
+            **kwargs)
+        self.seed = seed
+        self.iter_solver = iter_solver
+        self.guess = None
+        self.load_databases()
+        self.load_method()
+        self.tech_rng = MCRandomNumberGenerator(self.tech_params, seed=seed)
+        self.bio_rng = MCRandomNumberGenerator(self.bio_params, seed=seed)
+        self.cf_rng = MCRandomNumberGenerator(self.cf_params, seed=seed)
+
+    def next(self):
+        self.build_technosphere_matrix(self.tech_rng.next())
+        self.build_biosphere_matrix(self.bio_rng.next())
+        self.build_characterization_matrix(self.cf_rng.next())
+
+        if not hasattr(self, "demand_array"):
+            self.build_demand_array()
+
+        self.lci_calculation()
+        self.lcia_calculation()
+
+    def solve_linear_system(self):
+        if not self.iter_solver or self.guess == None:
+            self.guess = spsolve(
+                self.technosphere_matrix.data,
+                self.demand_array.data)
+            return self.guess
+        else:
+            solution, status = self.iter_solver(
+                self.technosphere_matrix.data,
+                self.demand_array.data,
+                x0=self.guess)
+            if status != 0:
+                raise
+            return solution
+
+    def __iter__(self):
+        self.next()
+        return self.score
+from distutils.core import setup
+
+setup(
+  name='bw2calc',
+  version="0.1",
+  packages=["bw2calc"],
+  author="Chris Mutel",
+  author_email="cmutel@gmail.com",
+  license=open('LICENSE.txt').read(),
+  long_description=open('README.txt').read(),
+)