Zur Webseite der Uni Stuttgart

Zeitschriften und Konferenzberichte

Matching entries: 0
settings...
274. Multi-Level Timing Simulation on GPUs
Schneider, E., Kochte, M.A. and Wunderlich, H.-J.
to appear in Proceedings of the 23rd Asia and South Pacific Design Automation Conference (ASP-DAC'18), Jeju Island, Korea, 22-25 January 2018 , pp. 1-6
2018
 
BibTeX:
@inproceedings{SchneKW2018,
  author = {Schneider, Eric and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Multi-Level Timing Simulation on GPUs}},
  booktitle = {to appear in Proceedings of the 23rd Asia and South Pacific Design Automation Conference (ASP-DAC'18)},
  year = { 2018 },
  pages = {1--6}
}
273. Structure-oriented Test of Reconfigurable Scan Networks
Ull, D., Kochte, M.A. and Wunderlich, H.-J.
to appear in Proceedings of the 26th IEEE Asian Test Symposium (ATS'17), Taipei, Taiwan, 27-30 November 2017
2017
 
BibTeX:
@inproceedings{UllKW2017,
  author = {Ull, Dominik and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Structure-oriented Test of Reconfigurable Scan Networks}},
  booktitle = {to appear in Proceedings of the 26th IEEE Asian Test Symposium (ATS'17)},
  year = {2017}
}
272. Analysis and Mitigation of IR-Drop Induced Scan Shift-Errors
Holst, S., Schneider, E., Kawagoe, K., Kochte, M.A., Miyase, K., Wunderlich, H.-J., Kajihara, S. and Wen, X.
to appear in Proceedings of the IEEE International Test Conference (ITC'17), Fort Worth, Texas, USA, 31 October-2 November 2017
2017
 
BibTeX:
@inproceedings{HolstSKKMWKW2017,
  author = {Holst, Stefan and Schneider, Eric and Kawagoe, Koshi and Kochte, Michael A. and Miyase, Kohei and Wunderlich, Hans-Joachim and Kajihara, Seiji and Wen, Xiaoqing},
  title = {{Analysis and Mitigation of IR-Drop Induced Scan Shift-Errors}},
  booktitle = {to appear in Proceedings of the IEEE International Test Conference (ITC'17)},
  year = {2017}
}
271. Self-Test and Diagnosis for Self-Aware Systems
Survey for Design & Test Special Issue on “Self-Awareness in SoCs”
Kochte, M.A. and Wunderlich, H.-J.
IEEE Design & Test, 13 October 2017
2017
DOI PDF 
Keywords: Self-test, diagnosis, health monitoring, fault management, on-chip infrastructure
Abstract: Self-awareness allows autonomous systems the dynamic adaptation to changing states of the hardware platform and the optimal usage of available computing resources. This demands concurrent, periodical, or on-demand monitoring and testing of the hardware structures to detect and classify deviations from the nominal behavior and appropriate reactions. This survey discusses suitable self-test, self-checking, and self-diagnosis methods for the realization of self-awareness and presents two case studies in which such methods are applied at different levels.
BibTeX:
@article{KochtW2017,
  author = {Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Self-Test and Diagnosis for Self-Aware Systems}},
  journal = {IEEE Design & Test},
  year = {2017},
  keywords = {Self-test, diagnosis, health monitoring, fault management, on-chip infrastructure},
  abstract = {Self-awareness allows autonomous systems the dynamic adaptation to changing states of the hardware platform and the optimal usage of available computing resources. This demands concurrent, periodical, or on-demand monitoring and testing of the hardware structures to detect and classify deviations from the nominal behavior and appropriate reactions. This survey discusses suitable self-test, self-checking, and self-diagnosis methods for the realization of self-awareness and presents two case studies in which such methods are applied at different levels.},
  doi = {http://dx.doi.org/10.1109/MDAT.2017.2762903},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2017/DT_KochtW2017.pdf}
}
270. Trustworthy Reconfigurable Access to On-Chip Infrastructure
Kochte, M.A., Baranowski, R. and Wunderlich, H.-J.
Proceedings of the 1st International Test Conference in Asia (ITC-Asia'17), Taipei, Taiwan, 13-15 September 2017
2017
 
BibTeX:
@inproceedings{KochtBW2017,
  author = {Kochte, Michael A. and Baranowski, Rafal and Wunderlich, Hans-Joachim},
  title = {{Trustworthy Reconfigurable Access to On-Chip Infrastructure}},
  booktitle = {Proceedings of the 1st International Test Conference in Asia (ITC-Asia'17)},
  year = {2017}
}
269. Energy-efficient and Error-resilient Iterative Solvers for Approximate Computing
Schöll, A., Braun, C. and Wunderlich, H.-J.
Proceedings of the 23rd IEEE International Symposium on On-Line Testing and Robust System Design (IOLTS'17), Thessaloniki, Greece, 3-5 July 2017, pp. 237-239
2017
DOI PDF 
Keywords: Approximate Computing, Energy-efficiency, Fault Tolerance, Quality Monitoring
Abstract: Iterative solvers like the Preconditioned Conjugate Gradient (PCG) method are widely-used in compute-intensive domains including science and engineering that often impose tight accuracy demands on computational results. At the same time, the error resilience of such solvers may change in the course of the iterations, which requires careful adaption of the induced approximation errors to reduce the energy demand while avoiding unacceptable results. A novel adaptive method is presented that enables iterative Preconditioned Conjugate Gradient (PCG) solvers on Approximate Computing hardware with high energy efficiency while still providing correct results. The method controls the underlying precision at runtime using a highly efficient fault tolerance technique that monitors the induced error and the quality of intermediate computational results.
BibTeX:
@inproceedings{SchoeBW2017,
  author = {Schöll, Alexander and Braun, Claus and Wunderlich, Hans-Joachim},
  title = {{Energy-efficient and Error-resilient Iterative Solvers for Approximate Computing}},
  booktitle = {Proceedings of the 23rd IEEE International Symposium on On-Line Testing and Robust System Design (IOLTS'17)},
  year = {2017},
  pages = {237--239},
  keywords = {Approximate Computing, Energy-efficiency, Fault Tolerance, Quality Monitoring},
  abstract = {Iterative solvers like the Preconditioned Conjugate Gradient (PCG) method are widely-used in compute-intensive domains including science and engineering that often impose tight accuracy demands on computational results. At the same time, the error resilience of such solvers may change in the course of the iterations, which requires careful adaption of the induced approximation errors to reduce the energy demand while avoiding unacceptable results. A novel adaptive method is presented that enables iterative Preconditioned Conjugate Gradient (PCG) solvers on Approximate Computing hardware with high energy efficiency while still providing correct results. The method controls the underlying precision at runtime using a highly efficient fault tolerance technique that monitors the induced error and the quality of intermediate computational results.},
  doi = {http://dx.doi.org/10.1109/IOLTS.2017.8046244},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2017/IOLTS_SchoeBW2017.pdf}
}
268. Aging Resilience and Fault Tolerance in Runtime Reconfigurable Architectures
Zhang, H., Bauer, L., Kochte, M.A., Schneider, E., Wunderlich, H.-J. and Henkel, J.
IEEE Transactions on Computers
Vol. 66(6), 1 June 2017, pp. 957-970
2017
DOI PDF 
Keywords: Runtime reconfiguration, aging mitigation, fault-tolerance, resilience, graceful degradation, FPGA
Abstract: Runtime reconfigurable architectures based on Field-Programmable Gate Arrays (FPGAs) allow area- and power-efficient acceleration of complex applications. However, being manufactured in latest semiconductor process technologies, FPGAs are increasingly prone to aging effects, which reduce the reliability and lifetime of such systems. Aging mitigation and fault tolerance techniques for the reconfigurable fabric become essential to realize dependable reconfigurable architectures. This article presents an accelerator diversification method that creates multiple configurations for runtime reconfigurable accelerators that are diversified in their usage of Configurable Logic Blocks (CLBs). In particular, it creates a minimal number of configurations such that all single-CLB and some multi-CLB faults can be tolerated. For each fault we ensure that there is at least one configuration that does not use that CLB.
Secondly, a novel runtime accelerator placement algorithm is presented that exploits the diversity in resource usage of these configurations to balance the stress imposed by executions of the accelerators on the reconfigurable fabric. By tracking the stress due to accelerator usage at runtime, the stress is balanced both within a reconfigurable region as well as over all reconfigurable regions of the system. The accelerator placement algorithm also considers faulty CLBs in the regions and selects the appropriate configuration such that the system maintains a high performance in presence of multiple permanent faults.
Experimental results demonstrate that our methods deliver up to 3.7x higher performance in presence of faults at marginal runtime costs and 1.6x higher MTTF than state-of-the-art aging mitigation methods.
BibTeX:
@article{ZhangBKSWH2017,
  author = {Zhang, Hongyan and Bauer, Lars and Kochte, Michael A. and Schneider, Eric and Wunderlich, Hans-Joachim and Henkel, Jörg},
  title = {{Aging Resilience and Fault Tolerance in Runtime Reconfigurable Architectures}},
  journal = {IEEE Transactions on Computers},
  year = {2017},
  volume = {66},
  number = {6},
  pages = {957--970},
  keywords = {Runtime reconfiguration, aging mitigation, fault-tolerance, resilience, graceful degradation, FPGA},
  abstract = {Runtime reconfigurable architectures based on Field-Programmable Gate Arrays (FPGAs) allow area- and power-efficient acceleration of complex applications. However, being manufactured in latest semiconductor process technologies, FPGAs are increasingly prone to aging effects, which reduce the reliability and lifetime of such systems. Aging mitigation and fault tolerance techniques for the reconfigurable fabric become essential to realize dependable reconfigurable architectures. This article presents an accelerator diversification method that creates multiple configurations for runtime reconfigurable accelerators that are diversified in their usage of Configurable Logic Blocks (CLBs). In particular, it creates a minimal number of configurations such that all single-CLB and some multi-CLB faults can be tolerated. For each fault we ensure that there is at least one configuration that does not use that CLB.
Secondly, a novel runtime accelerator placement algorithm is presented that exploits the diversity in resource usage of these configurations to balance the stress imposed by executions of the accelerators on the reconfigurable fabric. By tracking the stress due to accelerator usage at runtime, the stress is balanced both within a reconfigurable region as well as over all reconfigurable regions of the system. The accelerator placement algorithm also considers faulty CLBs in the regions and selects the appropriate configuration such that the system maintains a high performance in presence of multiple permanent faults.
Experimental results demonstrate that our methods deliver up to 3.7x higher performance in presence of faults at marginal runtime costs and 1.6x higher MTTF than state-of-the-art aging mitigation methods. }, doi = {http://dx.doi.org/10.1109/TC.2016.2616405}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2017/TC_ZhangBKSWH2017.pdf} }
267. Specification and Verification of Security in Reconfigurable Scan Networks
Kochte, M.A., Sauer, M., Rodríguez Gómez, L., Raiola, P., Becker, B. and Wunderlich, H.-J.
Proceedings of the 22nd IEEE European Test Symposium (ETS'17), Limassol, Cyprus, 22-26 May 2017, pp. 1-6
2017
DOI PDF 
Keywords: Keywords-Access Control, On-Chip Infrastructure, Reconfigurable Scan Network, Verification, Side-Channel Attack, IEEE Std 1687, IJTAG, Hardware Security
Abstract: A large amount of on-chip infrastructure, such as design-for-test, debug, monitoring, or calibration, is required for the efficient manufacturing, debug, and operation of complex hardware systems. The access to such infrastructure poses severe system safety and security threats since it may constitute a side-channel exposing internal state, sensitive data, or IP to attackers. Reconfigurable scan networks (RSNs) have been proposed as a scalable and flexible scan-based access mechanism to on-chip infrastructure. The increasing number and variety of integrated infrastructure as well as diverse access constraints over the system lifetime demand for systematic methods for the specification and formal verification of access protection and security properties in RSNs. This work presents a novel method to specify and verify fine-grained access permissions and restrictions to instruments attached to an RSN. The permissions and restrictions are transformed into predicates that are added to a formal model of a given RSN to prove which access properties hold or do not hold.
BibTeX:
@inproceedings{KochtSRRBW2017,
  author = {Kochte, Michael A. and Sauer, Matthias and Rodríguez Gómez, Laura and Raiola, Pascal and Becker, Bernd and Wunderlich, Hans-Joachim},
  title = {{Specification and Verification of Security in Reconfigurable Scan Networks}},
  booktitle = {Proceedings of the 22nd IEEE European Test Symposium (ETS'17)},
  year = {2017},
  pages = {1--6},
  keywords = {Keywords-Access Control, On-Chip Infrastructure, Reconfigurable Scan Network, Verification, Side-Channel Attack, IEEE Std 1687, IJTAG, Hardware Security},
  abstract = {A large amount of on-chip infrastructure, such as design-for-test, debug, monitoring, or calibration, is required for the efficient manufacturing, debug, and operation of complex hardware systems. The access to such infrastructure poses severe system safety and security threats since it may constitute a side-channel exposing internal state, sensitive data, or IP to attackers. Reconfigurable scan networks (RSNs) have been proposed as a scalable and flexible scan-based access mechanism to on-chip infrastructure. The increasing number and variety of integrated infrastructure as well as diverse access constraints over the system lifetime demand for systematic methods for the specification and formal verification of access protection and security properties in RSNs. This work presents a novel method to specify and verify fine-grained access permissions and restrictions to instruments attached to an RSN. The permissions and restrictions are transformed into predicates that are added to a formal model of a given RSN to prove which access properties hold or do not hold.},
  doi = {http://dx.doi.org/10.1109/ETS.2017.7968247},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2017/ETS_KochtSRRBW2017.pdf}
}
266. Probabilistic Sensitization Analysis for Variation-Aware Path Delay Fault Test Evaluation
Wagner, M. and Wunderlich, H.-J.
Proceedings of the 22nd IEEE European Test Symposium (ETS'17), Limassol, Cyprus, 22-26 May 2017, pp. 1-6
2017
DOI PDF 
Keywords: delay test, process variations, delay test quality
Abstract: With the ever increasing process variability in recent technology nodes, path delay fault testing of digital integrated circuits has become a major challenge. A randomly chosen long path often has no robust test and many of the existing non-robust tests are likely invalidated by process variations. To generate path delay fault tests that are more tolerant towards process variations, the delay test generation must evaluate different non-robust tests and only those tests that sensitize the target path with a sufficiently high probability in presence of process variations must be selected. This requires a huge number of probability computations for a large number of target paths and makes the development of very efficient approximation algorithms mandatory for any practical application. In this paper, a novel and efficient probabilistic sensitization analysis is presented which is used to extract a small subcircuit for a given test vector-pair. The probability that a target path is sensitized by the vector-pair is computed efficiently and without significant error by a Monte-Carlo simulation of the subcircuit.
BibTeX:
@inproceedings{WagneW2017,
  author = {Wagner, Marcus and Wunderlich, Hans-Joachim},
  title = {{Probabilistic Sensitization Analysis for Variation-Aware Path Delay Fault Test Evaluation}},
  booktitle = {Proceedings of the 22nd IEEE European Test Symposium (ETS'17)},
  year = {2017},
  pages = {1--6},
  keywords = {delay test, process variations, delay test quality},
  abstract = {With the ever increasing process variability in recent technology nodes, path delay fault testing of digital integrated circuits has become a major challenge. A randomly chosen long path often has no robust test and many of the existing non-robust tests are likely invalidated by process variations. To generate path delay fault tests that are more tolerant towards process variations, the delay test generation must evaluate different non-robust tests and only those tests that sensitize the target path with a sufficiently high probability in presence of process variations must be selected. This requires a huge number of probability computations for a large number of target paths and makes the development of very efficient approximation algorithms mandatory for any practical application. In this paper, a novel and efficient probabilistic sensitization analysis is presented which is used to extract a small subcircuit for a given test vector-pair. The probability that a target path is sensitized by the vector-pair is computed efficiently and without significant error by a Monte-Carlo simulation of the subcircuit.},
  doi = {http://dx.doi.org/10.1109/ETS.2017.7968226},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2017/ETS_WagneW2017.pdf}
}
265. Multi-Layer Diagnosis for Fault-Tolerant Networks-on-Chip
Schley, G., Dalirsani, A., Eggenberger, M., Hatami, N., Wunderlich, H.-J. and Radetzki, M.
IEEE Transactions on Computers
Vol. 66(5), 1 May 2017, pp. 848-861
2017
DOI PDF 
Keywords: Networks-on-Chip, NoC, Diagnosis, Performance, Multi-layer, Design Space Exploration
Abstract: In order to tolerate faults that emerge in operating Networks-on-Chip, diagnosis techniques are employed for fault detection and localization. On various network layers, diverse diagnosis methods can be employed which differ in terms of their impact on network performance (e.g. by operating concurrently vs. pre-empting regular network operation) and the quality of diagnostic results. In this contribution, we show how diagnosis techniques of different network layers of a Network-on-Chip can be combined into multi-layer solutions. We present the cross-layer information flow used for the interaction between the layers and show the resulting benefit of the combination compared to layer-specific diagnosis. For evaluation, we investigate the diagnosis quality and the impact on system performance to explore the entire design space of layer-specific techniques and their multi-layer combinations. We identify pareto-optimal combinations that offer an increase of system performance by a factor of four compared to the single-layer diagnosis.
BibTeX:
@article{SchleDEHWR2017,
  author = {Schley, Gert and Dalirsani, Atefe and Eggenberger, Marcus and Hatami, Nadereh and Wunderlich, Hans-Joachim and Radetzki, Martin},
  title = {{Multi-Layer Diagnosis for Fault-Tolerant Networks-on-Chip}},
  journal = {IEEE Transactions on Computers},
  year = {2017},
  volume = {66},
  number = {5},
  pages = {848--861},
  keywords = {Networks-on-Chip, NoC, Diagnosis, Performance, Multi-layer, Design Space Exploration},
  abstract = {In order to tolerate faults that emerge in operating Networks-on-Chip, diagnosis techniques are employed for fault detection and localization. On various network layers, diverse diagnosis methods can be employed which differ in terms of their impact on network performance (e.g. by operating concurrently vs. pre-empting regular network operation) and the quality of diagnostic results. In this contribution, we show how diagnosis techniques of different network layers of a Network-on-Chip can be combined into multi-layer solutions. We present the cross-layer information flow used for the interaction between the layers and show the resulting benefit of the combination compared to layer-specific diagnosis. For evaluation, we investigate the diagnosis quality and the impact on system performance to explore the entire design space of layer-specific techniques and their multi-layer combinations. We identify pareto-optimal combinations that offer an increase of system performance by a factor of four compared to the single-layer diagnosis. },
  doi = {http://dx.doi.org/10.1109/TC.2016.2628058},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2017/TC_SchleDEHWR2017.pdf}
}
264. GPU-Accelerated Simulation of Small Delay Faults
Schneider, E., Kochte, M.A., Holst, S., Wen, X. and Wunderlich, H.-J.
IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)
Vol. 36(5), May 2017, pp. 829-841
2017
DOI PDF 
Keywords: Circuit faults, Computational modeling, Delays, Instruction sets, Integrated circuit modeling, Logic gates, Fault simulation, graphics processing unit (GPU), parallel, process variation, small gate delay faults, timing-accurate, waveform
Abstract: Delay fault simulation is an essential task during test pattern generation and reliability assessment of electronic circuits. With the high sensitivity of current nano-scale designs towards even smallest delay deviations, the simulation of small gate delay faults has become extremely important. Since these faults have a subtle impact on the timing behavior, traditional fault simulation approaches based on abstract timing models are not sufficient. Furthermore, the detection of these faults is compromised by the ubiquitous variations in the manufacturing processes, which causes the actual fault coverage to vary from circuit instance to circuit instance, and makes the use of timing accurate methods mandatory. However, the application of timing accurate techniques quickly becomes infeasible for larger designs due to excessive computational requirements. In this work, we present a method for fast and waveformaccurate simulation of small delay faults on graphics processing units with exceptional computational performance. By exploiting multiple dimensions of parallelism from gates, faults, waveforms and circuit instances, the proposed approach allows for timing-accurate and exhaustive small delay fault simulation under process variation for designs with millions of gates.
BibTeX:
@article{SchneKHWW2016,
  author = {Schneider, Eric and Kochte, Michael A. and Holst, Stefan and Wen, Xiaoqing and Wunderlich, Hans-Joachim},
  title = {{GPU-Accelerated Simulation of Small Delay Faults}},
  journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)},
  year = {2017},
  volume = {36},
  number = {5},
  pages = {829--841},
  keywords = {Circuit faults, Computational modeling, Delays, Instruction sets, Integrated circuit modeling, Logic gates, Fault simulation, graphics processing unit (GPU), parallel, process variation, small gate delay faults, timing-accurate, waveform},
  abstract = {Delay fault simulation is an essential task during test pattern generation and reliability assessment of electronic circuits. With the high sensitivity of current nano-scale designs towards even smallest delay deviations, the simulation of small gate delay faults has become extremely important. Since these faults have a subtle impact on the timing behavior, traditional fault simulation approaches based on abstract timing models are not sufficient. Furthermore, the detection of these faults is compromised by the ubiquitous variations in the manufacturing processes, which causes the actual fault coverage to vary from circuit instance to circuit instance, and makes the use of timing accurate methods mandatory. However, the application of timing accurate techniques quickly becomes infeasible for larger designs due to excessive computational requirements. In this work, we present a method for fast and waveformaccurate simulation of small delay faults on graphics processing units with exceptional computational performance. By exploiting multiple dimensions of parallelism from gates, faults, waveforms and circuit instances, the proposed approach allows for timing-accurate and exhaustive small delay fault simulation under process variation for designs with millions of gates.},
  doi = {http://dx.doi.org/10.1109/TCAD.2016.2598560},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/TCAD_SchneKHWW2016.pdf}
}
263. Special Session on Early Life Failures
Deshmukh, J., Kunz, W., Wunderlich, H.-J. and Hellebrand, S.
Proceedings of the 35th VLSI Test Symposium (VTS'17), Caesars Palace, Las Vegas, Nevada, USA, 9-12 April 2017
2017
DOI PDF 
Abstract: In recent years early life failures have caused several product recalls in semiconductor and automotive industries associated with a loss of billions of dollars. They can be traced back to various root-causes. In embedded or cyber-physical systems, the interaction with the environment and the behavior of the hardware/software interface are hard to predict, which may lead to unforeseen failures. In addition to that, defects that have escaped manufacturing test or “weak” devices that cannot stand operational stress may for example cause unexpected hardware problems in the early life of a system. The special session focuses on the first aspect. The first contribution discusses how the interaction with the environment in cyberphysical systems can be appropriately modeled and tested. The second presentation then deals with a cross-layer approach identifying problems at the hardware/software interface which cannot be compensated by the application and must therefore be targeted by specific tests.
BibTeX:
@inproceedings{DeshmKWH2017,
  author = {Deshmukh, Jyotirmoy and Kunz, Wolfgang and Wunderlich, Hans-Joachim and Hellebrand, Sybille},
  title = {{Special Session on Early Life Failures}},
  booktitle = {Proceedings of the 35th VLSI Test Symposium (VTS'17)},
  year = {2017},
  abstract = {In recent years early life failures have caused several product recalls in semiconductor and automotive industries associated with a loss of billions of dollars. They can be traced back to various root-causes. In embedded or cyber-physical systems, the interaction with the environment and the behavior of the hardware/software interface are hard to predict, which may lead to unforeseen failures. In addition to that, defects that have escaped manufacturing test or “weak” devices that cannot stand operational stress may for example cause unexpected hardware problems in the early life of a system. The special session focuses on the first aspect. The first contribution discusses how the interaction with the environment in cyberphysical systems can be appropriately modeled and tested. The second presentation then deals with a cross-layer approach identifying problems at the hardware/software interface which cannot be compensated by the application and must therefore be targeted by specific tests.},
  doi = {http://dx.doi.org/10.1109/VTS.2017.7928933},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2017/VTS_DeshmKWH2017.pdf}
}
262. Aging Monitor Reuse for Small Delay Fault Testing
Liu, C., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the 35th VLSI Test Symposium (VTS'17), Caesars Palace, Las Vegas, Nevada, USA, 9-12 April 2017
2017
DOI PDF 
Keywords: Delay monitoring, delay test, faster-than-at-speed test, stability checker, small delay fault, ATPG
Abstract: Small delay faults receive more and more attention, since they may indicate a circuit reliability marginality even if they do not violate the timing at the time of production. At-speed test and faster-than-at-speed test (FAST) are rather expensive tasks to test for such faults. The paper at hand avoids complex on-chip structures or expensive high-speed ATE for test response evaluation, if aging monitors which are integrated into the device under test anyway are reused. The main challenge in reusing aging monitors for FAST consists in possible false alerts at higher frequencies. While a certain test vector pair makes a delay fault observable at one monitor, it may also exceed the time slack in the fault free case at a different monitor which has to be masked. Therefore, a multidimensional optimizing problem has to be solved for minimizing the masking overhead and the number of test vectors while maximizing delay fault coverage.
BibTeX:
@inproceedings{LiuKW2017,
  author = {Liu, Chang and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Aging Monitor Reuse for Small Delay Fault Testing}},
  booktitle = {Proceedings of the 35th VLSI Test Symposium (VTS'17)},
  year = {2017},
  keywords = {Delay monitoring, delay test, faster-than-at-speed test, stability checker, small delay fault, ATPG},
  abstract = {Small delay faults receive more and more attention, since they may indicate a circuit reliability marginality even if they do not violate the timing at the time of production. At-speed test and faster-than-at-speed test (FAST) are rather expensive tasks to test for such faults. The paper at hand avoids complex on-chip structures or expensive high-speed ATE for test response evaluation, if aging monitors which are integrated into the device under test anyway are reused. The main challenge in reusing aging monitors for FAST consists in possible false alerts at higher frequencies. While a certain test vector pair makes a delay fault observable at one monitor, it may also exceed the time slack in the fault free case at a different monitor which has to be masked. Therefore, a multidimensional optimizing problem has to be solved for minimizing the masking overhead and the number of test vectors while maximizing delay fault coverage.},
  doi = {http://dx.doi.org/10.1109/VTS.2017.7928921},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2017/VTS_LiuKW2017.pdf}
}
261. Functional Diagnosis for Graceful Degradation of NoC Switches
Dalirsani, A. and Wunderlich, H.-J.
Proceedings of the 25th IEEE Asian Test Symposium (ATS'16), Hiroshima, Japan, 21-24 November 2016, pp. 246-251
2016
DOI PDF 
Keywords: Functional test, functional failure mode, fault classification, functional diagnosis, pattern generation, finegrained reconfiguration
Abstract: Reconfigurable Networks-on-Chip (NoCs) allow discarding the corrupted ports of a defective switch instead of deactivating it entirely, and thus enable fine-grained reconfiguration of the network, making the NoC structures more robust. A prerequisite for such a fine-grained reconfiguration is to identify the corrupted port of a faulty switch. This paper presents a functional diagnosis approach which extracts structural fault information from functional tests and utilizes this information to identify the broken functions/ports of a defective switch. The broken parts are discarded while the remaining functions are used for the normal operation. The non-intrusive method introduced is independent of the switch architecture and the NoC topology and can be applied for any type of structural fault. The i diagnostic resolution of the functional test is so high that for nearly 64% of the faults in the example switch only a single port has to be switched off. As the remaining parts stay completely functional, the impact of faults on throughput and performance is minimized.
BibTeX:
@inproceedings{DalirW2016,
  author = {Dalirsani, Atefe and Wunderlich, Hans-Joachim},
  title = {{Functional Diagnosis for Graceful Degradation of NoC Switches}},
  booktitle = {Proceedings of the 25th IEEE Asian Test Symposium (ATS'16)},
  year = {2016},
  pages = {246--251},
  keywords = {Functional test, functional failure mode, fault classification, functional diagnosis, pattern generation, finegrained reconfiguration},
  abstract = {Reconfigurable Networks-on-Chip (NoCs) allow discarding the corrupted ports of a defective switch instead of deactivating it entirely, and thus enable fine-grained reconfiguration of the network, making the NoC structures more robust. A prerequisite for such a fine-grained reconfiguration is to identify the corrupted port of a faulty switch. This paper presents a functional diagnosis approach which extracts structural fault information from functional tests and utilizes this information to identify the broken functions/ports of a defective switch. The broken parts are discarded while the remaining functions are used for the normal operation. The non-intrusive method introduced is independent of the switch architecture and the NoC topology and can be applied for any type of structural fault. The i diagnostic resolution of the functional test is so high that for nearly 64% of the faults in the example switch only a single port has to be switched off. As the remaining parts stay completely functional, the impact of faults on throughput and performance is minimized.},
  doi = {http://dx.doi.org/10.1109/ATS.2016.18},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/ATS_DalirW2016.pdf}
}
260. Timing-Accurate Estimation of IR-Drop Impact on Logic- and Clock-Paths During At-Speed Scan Test
Holst, S., Schneider, E., Wen, X., Kajihara, S., Yamato, Y., Wunderlich, H.-J. and Kochte, M.A.
Proceedings of the 25th IEEE Asian Test Symposium (ATS'16), Hiroshima, Japan, 21-24 November 2016, pp. 19-24
2016
DOI PDF 
Abstract: IR-drop induced false capture failures and test clock stretch are severe problems in at-speed scan testing. We propose a new method to efficiently and accurately identify these problems. For the first time, our approach considers the additional dynamic power caused by glitches, the spatial and temporal distribution of all toggles, and their impact on both logic paths and the clock tree without time-consuming electrical simulations.
BibTeX:
@inproceedings{HolstSWKYWK2016,
  author = {Holst, Stefan and Schneider, Eric and Wen, Xiaoqing and Kajihara, Seiji and Yamato, Yuta and Wunderlich, Hans-Joachim and Kochte, Michael A.},
  title = {{Timing-Accurate Estimation of IR-Drop Impact on Logic- and Clock-Paths During At-Speed Scan Test}},
  booktitle = {Proceedings of the 25th IEEE Asian Test Symposium (ATS'16)},
  year = {2016},
  pages = {19--24},
  abstract = {IR-drop induced false capture failures and test clock stretch are severe problems in at-speed scan testing. We propose a new method to efficiently and accurately identify these problems. For the first time, our approach considers the additional dynamic power caused by glitches, the spatial and temporal distribution of all toggles, and their impact on both logic paths and the clock tree without time-consuming electrical simulations.},
  doi = {http://dx.doi.org/10.1109/ATS.2016.49},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/ATS_HolstSWKYWK2016.pdf}
}
259. Test Strategies for Reconfigurable Scan Networks
Kochte, M.A., Baranowski, R., Schaal, M. and Wunderlich, H.-J.
Proceedings of the 25th IEEE Asian Test Symposium (ATS'16), Hiroshima, Japan, 21-24 November 2016, pp. 113-118
2016
DOI PDF 
Keywords: Test generation, reconfigurable scan network, design-for-test, on-chip infrastructure, IEEE Std 1687, iJTAG
Abstract: On-chip infrastructure is an essential part of today’s complex designs and enables their cost-efficient manufacturing and operation. The diversity and high number of infrastructure elements demands flexible and low-latency access mechanisms, such as reconfigurable scan networks (RSNs). The correct operation of the infrastructure access itself is highly important for the test of the system logic, its diagnosis, debug and bring-up, as well as post-silicon validation. Ensuring correct operation requires the thorough testing of the RSN. Because of sequential and combinational dependencies in RSN accesses, test generation for general RSNs is computationally very difficult and requires dedicated test strategies. This paper explores different test strategies for general RSNs and discusses the achieved structural fault coverage. Experimental results show that the combination of functional test heuristics together with a dedicated RSN test pattern generation approach significantly outperforms the test quality of a standard ATPG tool.
BibTeX:
@inproceedings{KochtBSW2016,
  author = {Kochte, Michael A. and Baranowski, Rafal and Schaal, Marcel and Wunderlich, Hans-Joachim},
  title = {{Test Strategies for Reconfigurable Scan Networks}},
  booktitle = {Proceedings of the 25th IEEE Asian Test Symposium (ATS'16)},
  year = {2016},
  pages = {113--118},
  keywords = {Test generation, reconfigurable scan network, design-for-test, on-chip infrastructure, IEEE Std 1687, iJTAG},
  abstract = {On-chip infrastructure is an essential part of today’s complex designs and enables their cost-efficient manufacturing and operation. The diversity and high number of infrastructure elements demands flexible and low-latency access mechanisms, such as reconfigurable scan networks (RSNs). The correct operation of the infrastructure access itself is highly important for the test of the system logic, its diagnosis, debug and bring-up, as well as post-silicon validation. Ensuring correct operation requires the thorough testing of the RSN. Because of sequential and combinational dependencies in RSN accesses, test generation for general RSNs is computationally very difficult and requires dedicated test strategies. This paper explores different test strategies for general RSNs and discusses the achieved structural fault coverage. Experimental results show that the combination of functional test heuristics together with a dedicated RSN test pattern generation approach significantly outperforms the test quality of a standard ATPG tool.},
  doi = {http://dx.doi.org/10.1109/ATS.2016.35},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/ATS_KochtBSW2016.pdf}
}
258. A Neural-Network-Based Fault Classifier
Rodríguez Gómez, L. and Wunderlich, H.-J.
Proceedings of the 25th IEEE Asian Test Symposium (ATS'16), Hiroshima, Japan, 21-24 November 2016, pp. 144-149
2016
DOI PDF 
Keywords: Neural networks, machine learning, fault classification, diagnosis
Abstract: In order to reduce the number of defective parts and increase yield, especially in early stages of production, systematic defects must be identified and corrected as soon as possible. This paper presents a technique to move defect classification to the earliest phase of volume testing without any special diagnostic test patterns. A neural-network-based fault classifier is described, which is able to raise a warning, if the frequency of certain defect mechanisms increases. Only in this case more sophisticated diagnostic patterns or the even more expensive physical failure analysis have to be applied. The fault classification method presented here is able to extract underlying fault types with high confidence by identifying relevant features from the circuit topology and from logic simulation.
BibTeX:
@inproceedings{RodriW2016,
  author = {Rodríguez Gómez, Laura and Wunderlich, Hans-Joachim},
  title = {{A Neural-Network-Based Fault Classifier}},
  booktitle = {Proceedings of the 25th IEEE Asian Test Symposium (ATS'16)},
  year = {2016},
  pages = {144--149},
  keywords = {Neural networks, machine learning, fault classification, diagnosis},
  abstract = {In order to reduce the number of defective parts and increase yield, especially in early stages of production, systematic defects must be identified and corrected as soon as possible. This paper presents a technique to move defect classification to the earliest phase of volume testing without any special diagnostic test patterns. A neural-network-based fault classifier is described, which is able to raise a warning, if the frequency of certain defect mechanisms increases. Only in this case more sophisticated diagnostic patterns or the even more expensive physical failure analysis have to be applied. The fault classification method presented here is able to extract underlying fault types with high confidence by identifying relevant features from the circuit topology and from logic simulation.},
  doi = {http://dx.doi.org/10.1109/ATS.2016.46},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/ATS_RodriW2016.pdf}
}
257. High-Throughput Transistor-Level Fault Simulation on GPUs
Schneider, E. and Wunderlich, H.-J.
Proceedings of the 25th IEEE Asian Test Symposium (ATS'16), Hiroshima, Japan, 21-24 November 2016, pp. 150-155
2016
DOI PDF 
Keywords: fault simulation; transistor level; switch level; GPUs
Abstract: Deviations in the first-order parameters of CMOS cells can lead to severe errors in the functional and time domain. With increasing sensitivity of these parameters to manufacturing defects and variation, parametric and parasitic-aware fault simulation is becoming crucial in order to support test pattern generation. Traditional approaches based on gate-level models are not sufficient to represent and capture the impact of deviations in these parameters in either an efficient or accurate manner. Evaluation at electrical level, on the other hand, severely lacks execution speed and quickly becomes inapplicable to larger designs due to high computational demands. This work presents a novel fault simulation approach considering first-order parameters in CMOS circuits to explicitly capture CMOS-specific behavior in the functional and time domain with transistor granularity. The approach utilizes massive parallelization in order to achieve high-throughput acceleration on Graphics Processing Units (GPUs) by exploiting parallelism of cells, stimuli and faults. Despite the more precise level of abstraction, the simulator is able to process designs with millions of gates and even outperforms conventional simulation at logic level in terms of modeling accuracy and simulation speed.
BibTeX:
@inproceedings{SchneW2016,
  author = {Schneider, Eric and Wunderlich, Hans-Joachim},
  title = {{High-Throughput Transistor-Level Fault Simulation on GPUs}},
  booktitle = {Proceedings of the 25th IEEE Asian Test Symposium (ATS'16)},
  year = {2016},
  pages = {150--155},
  keywords = {fault simulation; transistor level; switch level; GPUs},
  abstract = {Deviations in the first-order parameters of CMOS cells can lead to severe errors in the functional and time domain. With increasing sensitivity of these parameters to manufacturing defects and variation, parametric and parasitic-aware fault simulation is becoming crucial in order to support test pattern generation. Traditional approaches based on gate-level models are not sufficient to represent and capture the impact of deviations in these parameters in either an efficient or accurate manner. Evaluation at electrical level, on the other hand, severely lacks execution speed and quickly becomes inapplicable to larger designs due to high computational demands. This work presents a novel fault simulation approach considering first-order parameters in CMOS circuits to explicitly capture CMOS-specific behavior in the functional and time domain with transistor granularity. The approach utilizes massive parallelization in order to achieve high-throughput acceleration on Graphics Processing Units (GPUs) by exploiting parallelism of cells, stimuli and faults. Despite the more precise level of abstraction, the simulator is able to process designs with millions of gates and even outperforms conventional simulation at logic level in terms of modeling accuracy and simulation speed.},
  doi = {http://dx.doi.org/10.1109/ATS.2016.9},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/ATS_SchneW2016.pdf}
}
256. Autonomous Testing for 3D-ICs with IEEE Std. 1687
Ye, J.-C., Kochte, M.A., Lee, K.-J. and Wunderlich, H.-J.
Proceedings of the 25th IEEE Asian Test Symposium (ATS'16), Hiroshima, Japan, 21-24 November 2016, pp. 215-220
2016
DOI PDF 
Keywords: IEEE Std. 1687, IJTAG, reconfigurable scan network, autonomous testing, 3D-ICs, DFT
Abstract: IEEE Std. 1687, or IJTAG, defines flexible serial scan-based architectures for accessing embedded instruments efficiently. In this paper, we present a novel test architecture that employs IEEE Std. 1687 together with an efficient test controller to carry out 3D-IC testing autonomously. The test controller can deliver parallel test data for the IEEE Std. 1687 structures and the cores under test, and provide required control signals to control the whole test procedure. This design can achieve at-speed, autonomous and programmable testing in 3D-ICs. Experimental results show that the additional area and test cycle overhead of this architecture is small considering its autonomous test capability.
BibTeX:
@inproceedings{YeKLW2016,
  author = {Ye, Jin-Cun and Kochte, Michael A. and Lee, Kuen-Jong and Wunderlich, Hans-Joachim},
  title = {{Autonomous Testing for 3D-ICs with IEEE Std. 1687}},
  booktitle = {Proceedings of the 25th IEEE Asian Test Symposium (ATS'16)},
  year = {2016},
  pages = {215--220},
  keywords = {IEEE Std. 1687, IJTAG, reconfigurable scan network, autonomous testing, 3D-ICs, DFT},
  abstract = {IEEE Std. 1687, or IJTAG, defines flexible serial scan-based architectures for accessing embedded instruments efficiently. In this paper, we present a novel test architecture that employs IEEE Std. 1687 together with an efficient test controller to carry out 3D-IC testing autonomously. The test controller can deliver parallel test data for the IEEE Std. 1687 structures and the cores under test, and provide required control signals to control the whole test procedure. This design can achieve at-speed, autonomous and programmable testing in 3D-ICs. Experimental results show that the additional area and test cycle overhead of this architecture is small considering its autonomous test capability.},
  doi = {http://dx.doi.org/10.1109/ATS.2016.56},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/ATS_YeKLW2016.pdf}
}
255. Applying Efficient Fault Tolerance to Enable the Preconditioned Conjugate Gradient Solver on Approximate Computing Hardware
Schöll, A., Braun, C. and Wunderlich, H.-J.
Proceedings of the IEEE International Symposium on Defect and Fault Tolerance in VLSI and Nanotechnology Systems (DFT'16), University of Connecticut, USA, 19-20 September 2016, pp. 21-26
DFT 2016 Best Paper Award
2016
DOI PDF 
Keywords: Approximate Computing, Fault Tolerance, Sparse Linear System Solving, Preconditioned Conjugate Gradient
Abstract: A new technique is presented that allows to execute the preconditioned conjugate gradient (PCG) solver on approximate hardware while ensuring correct solver results. This technique expands the scope of approximate computing to scientific and engineering applications. The changing error resilience of PCG during the solving process is exploited by different levels of approximation which trade off numerical accuracy and hardware utilization. Such approximation levels are determined at runtime by periodically estimating the error resilience. An efficient fault tolerance technique allows reductions in hardware utilization by ensuring the continued exploitation of maximum allowed energy-accuracy trade-offs. Experimental results show that the hardware utilization is reduced on average by 14.5% and by up to 41.0% compared to executing PCG on accurate hardware.
BibTeX:
@inproceedings{SchoeBW2016,
  author = {Schöll, Alexander and Braun, Claus and Wunderlich, Hans-Joachim},
  title = {{Applying Efficient Fault Tolerance to Enable the Preconditioned Conjugate Gradient Solver on Approximate Computing Hardware}},
  booktitle = {Proceedings of the IEEE International Symposium on Defect and Fault Tolerance in VLSI and Nanotechnology Systems (DFT'16)},
  year = {2016},
  pages = {21-26},
  keywords = {Approximate Computing, Fault Tolerance, Sparse Linear System Solving, Preconditioned Conjugate Gradient},
  abstract = {A new technique is presented that allows to execute the preconditioned conjugate gradient (PCG) solver on approximate hardware while ensuring correct solver results. This technique expands the scope of approximate computing to scientific and engineering applications. The changing error resilience of PCG during the solving process is exploited by different levels of approximation which trade off numerical accuracy and hardware utilization. Such approximation levels are determined at runtime by periodically estimating the error resilience. An efficient fault tolerance technique allows reductions in hardware utilization by ensuring the continued exploitation of maximum allowed energy-accuracy trade-offs. Experimental results show that the hardware utilization is reduced on average by 14.5% and by up to 41.0% compared to executing PCG on accurate hardware.},
  doi = {http://dx.doi.org/10.1109/DFT.2016.7684063},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/DFT_SchoeBW2016.pdf}
}
254. Pushing the Limits: How Fault Tolerance Extends the Scope of Approximate Computing
Wunderlich, H.-J., Braun, C. and Schöll, A.
Proceedings of the 22nd IEEE International Symposium on On-Line Testing and Robust System Design (IOLTS'16), Sant Feliu de Guixols, Catalunya, Spain, 4-6 July 2016, pp. 133-136
2016
DOI PDF 
Keywords: Approximate Computing, Variable Precision, Metrics, Characterization, Fault Tolerance
Abstract: Approximate computing in hardware and software promises significantly improved computational performance combined with very low power and energy consumption. This goal is achieved by both relaxing strict requirements on accuracy and precision, and by allowing a deviating behavior from exact Boolean specifications to a certain extent. Today, approximate computing is often limited to applications with a certain degree of inherent error tolerance, where perfect computational results are not always required. However, in order to fully utilize its benefits, the scope of applications has to be significantly extended to other compute-intensive domains including science and engineering. To meet the often rather strict quality and reliability requirements for computational results in these domains, the use of appropriate characterization and fault tolerance measures is highly required. In this paper, we evaluate some of the available techniques and how they may extend the scope of application for approximate computing.
BibTeX:
@inproceedings{WundeBS2016,
  author = {Wunderlich, Hans-Joachim and Braun, Claus and Schöll, Alexander},
  title = {{Pushing the Limits: How Fault Tolerance Extends the Scope of Approximate Computing}},
  booktitle = {Proceedings of the 22nd IEEE International Symposium on On-Line Testing and Robust System Design (IOLTS'16)},
  year = {2016},
  pages = {133--136},
  keywords = {Approximate Computing, Variable Precision, Metrics, Characterization, Fault Tolerance},
  abstract = {Approximate computing in hardware and software promises significantly improved computational performance combined with very low power and energy consumption. This goal is achieved by both relaxing strict requirements on accuracy and precision, and by allowing a deviating behavior from exact Boolean specifications to a certain extent. Today, approximate computing is often limited to applications with a certain degree of inherent error tolerance, where perfect computational results are not always required. However, in order to fully utilize its benefits, the scope of applications has to be significantly extended to other compute-intensive domains including science and engineering. To meet the often rather strict quality and reliability requirements for computational results in these domains, the use of appropriate characterization and fault tolerance measures is highly required. In this paper, we evaluate some of the available techniques and how they may extend the scope of application for approximate computing.},
  doi = {http://dx.doi.org/10.1109/IOLTS.2016.7604686},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/IOLTS_WundeBS2016.pdf}
}
253. Efficient Algorithm-Based Fault Tolerance for Sparse Matrix Operations
Schöll, A., Braun, C., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the 46th Annual IEEE/IFIP International Conference on Dependable Systems and Networks (DSN'16), Toulouse, France, 28 June-1 July 2016, pp. 251-262
2016
DOI PDF 
Keywords: Fault Tolerance, Sparse Linear Algebra, ABFT, Online Error Localization
Abstract: We propose a fault tolerance approach for sparse matrix operations that detects and implicitly locates errors in the results for efficient local correction. This approach reduces the runtime overhead for fault tolerance and provides high error coverage. Existing algorithm-based fault tolerance approaches for sparse matrix operations detect and correct errors, but they often rely on expensive error localization steps. General checkpointing schemes can induce large recovery cost for high error rates. For sparse matrix-vector multiplications, experimental results show an average reduction in runtime overhead of 43.8%, while the error coverage is on average improved by 52.2% compared to related work. The practical applicability is demonstrated in a case study using the iterative Preconditioned Conjugate Gradient solver. When scaling the error rate by four orders of magnitude, the average runtime overhead increases only by 31.3% compared to low error rates.
BibTeX:
@inproceedings{SchoeBKW2016,
  author = {Schöll, Alexander and Braun, Claus and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Efficient Algorithm-Based Fault Tolerance for Sparse Matrix Operations}},
  booktitle = {Proceedings of the 46th Annual IEEE/IFIP International Conference on Dependable Systems and Networks (DSN'16)},
  year = {2016},
  pages = {251--262},
  keywords = {Fault Tolerance, Sparse Linear Algebra, ABFT, Online Error Localization},
  abstract = {We propose a fault tolerance approach for sparse matrix operations that detects and implicitly locates errors in the results for efficient local correction. This approach reduces the runtime overhead for fault tolerance and provides high error coverage. Existing algorithm-based fault tolerance approaches for sparse matrix operations detect and correct errors, but they often rely on expensive error localization steps. General checkpointing schemes can induce large recovery cost for high error rates. For sparse matrix-vector multiplications, experimental results show an average reduction in runtime overhead of 43.8%, while the error coverage is on average improved by 52.2% compared to related work. The practical applicability is demonstrated in a case study using the iterative Preconditioned Conjugate Gradient solver. When scaling the error rate by four orders of magnitude, the average runtime overhead increases only by 31.3% compared to low error rates.},
  doi = {http://dx.doi.org/10.1109/DSN.2016.31},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/DSN_SchoeBKW2016.pdf}
}
252. Formal Verification of Secure Reconfigurable Scan Network Infrastructure
Kochte, M.A., Baranowski, R., Sauer, M., Becker, B. and Wunderlich, H.-J.
Proceedings of the 21st IEEE European Test Symposium (ETS'16), Amsterdam, The Netherlands, 23-27 May 2016 , pp. 1-6
2016
DOI PDF 
Keywords: Security, Formal verification, IEEE Std 1687, IJTAG, Reconfigurable scan network, Infrastructure, Sidechannel attack
Abstract: Reconfigurable scan networks (RSN) as standardized by IEEE Std 1687 allow flexible and efficient access to on-chip infrastructure for test and diagnosis, post-silicon validation, debug, bring-up, or maintenance in the field. However, unauthorized access or manipulation of the attached instruments, monitors, or controllers pose security and safety risks. Different RSN architectures have recently been proposed to implement secure access to the connected instruments, for instance by authentication and authorization. To ensure that the implemented security schemes cannot be bypassed, design verification of the security properties is mandatory. However, combinational and deep sequential dependencies of modern RSNs and their extensions for security require novel approaches to formal verification for unbounded model checking. This work presents for the first time a formal design verification methodology for security properties of RSNs based on unbounded model checking that is able to verify access protection at logical level. Experimental results demonstrate that state-of-the-art security schemes for RSNs can be efficiently handled, even for very large designs.
BibTeX:
@inproceedings{KochtBSBW2016,
  author = {Kochte, Michael A. and Baranowski, Rafal and Sauer, Matthias and Becker, Bernd and Wunderlich, Hans-Joachim },
  title = {{Formal Verification of Secure Reconfigurable Scan Network Infrastructure}},
  booktitle = {Proceedings of the 21st IEEE European Test Symposium (ETS'16)},
  year = { 2016 },
  pages = {1-6},
  keywords = {Security, Formal verification, IEEE Std 1687, IJTAG, Reconfigurable scan network, Infrastructure, Sidechannel attack},
  abstract = {Reconfigurable scan networks (RSN) as standardized by IEEE Std 1687 allow flexible and efficient access to on-chip infrastructure for test and diagnosis, post-silicon validation, debug, bring-up, or maintenance in the field. However, unauthorized access or manipulation of the attached instruments, monitors, or controllers pose security and safety risks. Different RSN architectures have recently been proposed to implement secure access to the connected instruments, for instance by authentication and authorization. To ensure that the implemented security schemes cannot be bypassed, design verification of the security properties is mandatory. However, combinational and deep sequential dependencies of modern RSNs and their extensions for security require novel approaches to formal verification for unbounded model checking. This work presents for the first time a formal design verification methodology for security properties of RSNs based on unbounded model checking that is able to verify access protection at logical level. Experimental results demonstrate that state-of-the-art security schemes for RSNs can be efficiently handled, even for very large designs.},
  doi = {http://dx.doi.org/10.1109/ETS.2016.7519290},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/ETS_KochtBSBW2016.pdf}
}
251. SHIVA: Sichere Hardware in der Informationsverarbeitung
Kochte, M.A., Sauer, M., Raiola, P., Becker, B. and Wunderlich, H.-J.
Proceedings of the ITG/GI/GMM edaWorkshop 2016, Hannover, Germany, 11-12 May 2016
2016
URL PDF 
Abstract: Das Projekt ”SHIVA: Sichere Hardware in der Informationsverarbeitung“ ist Teil des Forschungsprogramms ”IKTSicherheit für weltweit vernetzte vertrauenswürdige Infrastrukturen“ der Baden-Württemberg Stiftung. Ziel des Projekts sind die Erforschung von Entwurfs- und Verifikationsmethoden zur Steigerung der Sicherheit mikroelektronischer Hardware, beispielsweise aus der Automobilelektronik, der Medizintechnik oder auch der Fertigungstechnik. Es soll damit die missbräuchliche Verwendung nicht-funktionaler Hardware-Infrastruktur zur Beobachtung interner sensibler Daten, verwendeter Verfahren und Prozesse sowie zu Angriffen auf das geistige Eigentum an der Hardware ausgeschlossen werden. Das Projekt ist eine Kooperation des Instituts für Technische Informatik (ITI) der Universität Stuttgart und des Lehrstuhls für Rechnerarchitektur der Universität Freiburg. Dieser Beitrag stellt die Projektziele und erste Forschungsergebnisse vor.
BibTeX:
@inproceedings{KochtSRBW2016,
  author = {Kochte, Michael A. and Sauer, Matthias and Raiola, Pascal and Becker, Bernd and Wunderlich, Hans-Joachim},
  title = {{SHIVA: Sichere Hardware in der Informationsverarbeitung}},
  booktitle = {Proceedings of the ITG/GI/GMM edaWorkshop 2016},
  year = {2016},
  abstract = {Das Projekt ”SHIVA: Sichere Hardware in der Informationsverarbeitung“ ist Teil des Forschungsprogramms ”IKTSicherheit für weltweit vernetzte vertrauenswürdige Infrastrukturen“ der Baden-Württemberg Stiftung. Ziel des Projekts sind die Erforschung von Entwurfs- und Verifikationsmethoden zur Steigerung der Sicherheit mikroelektronischer Hardware, beispielsweise aus der Automobilelektronik, der Medizintechnik oder auch der Fertigungstechnik. Es soll damit die missbräuchliche Verwendung nicht-funktionaler Hardware-Infrastruktur zur Beobachtung interner sensibler Daten, verwendeter Verfahren und Prozesse sowie zu Angriffen auf das geistige Eigentum an der Hardware ausgeschlossen werden. Das Projekt ist eine Kooperation des Instituts für Technische Informatik (ITI) der Universität Stuttgart und des Lehrstuhls für Rechnerarchitektur der Universität Freiburg. Dieser Beitrag stellt die Projektziele und erste Forschungsergebnisse vor.},
  url = {http://www.book-on-demand.de/shop/14818},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/EDA_KochtSRBW2016.pdf}
}
250. Fault Tolerance of Approximate Compute Algorithms
Wunderlich, H.-J., Braun, C. and Schöll, A.
Proceedings of the 34th VLSI Test Symposium (VTS'16), Caesars Palace, Las Vegas, Nevada, USA, 25-27 April 2016
2016
DOI PDF 
Abstract: Approximate computing algorithms cover a wide range of different applications and the boundaries to domains like variable-precision computing, where the precision of the computations can be online adapted to the needs of the application [1, 2], as well as probabilistic and stochastic computing [3], which incorporate stochastic processes and probability distributions in the target computations, are sometimes blurred. The central idea of purely algorithm-based approximate computing is to transform algorithms, without necessarily requiring approximate hardware, to trade-off accuracy against energy. Early termination of algorithms that exhibit incremental refinement [4] reduces iterations at the cost of accuracy. Loop perforation [5] approximates iteratively-computed results by identifying and reducing loops that contribute only insignificantly to the solution. Another group of approximate algorithms is represented by neural networks, which can be trained to mimic certain algorithms and to compute approximate results [6]. Today, approximate computing is predominantly proposed for applications in multimedia and signal processing with a certain degree of inherent error tolerance. However, in order to fully utilize the benefits of these architectures, the scope of applications has to be significantly extended to other computeintensive tasks, for instance, in science and engineering. Such an extension requires that the allowed error or the required minimum precision of the application is either known beforehand or reliably determined online to deliver trustworthy and useful results. Errors outside the allowed range have to be reliably detected and tackled by appropriate fault tolerance measures.
BibTeX:
@inproceedings{WundeBS2016a,
  author = {Wunderlich, Hans-Joachim and Braun, Claus and Schöll, Alexander},
  title = {{Fault Tolerance of Approximate Compute Algorithms}},
  booktitle = {Proceedings of the 34th VLSI Test Symposium (VTS'16)},
  year = {2016},
  abstract = {Approximate computing algorithms cover a wide range of different applications and the boundaries to domains like variable-precision computing, where the precision of the computations can be online adapted to the needs of the application [1, 2], as well as probabilistic and stochastic computing [3], which incorporate stochastic processes and probability distributions in the target computations, are sometimes blurred. The central idea of purely algorithm-based approximate computing is to transform algorithms, without necessarily requiring approximate hardware, to trade-off accuracy against energy. Early termination of algorithms that exhibit incremental refinement [4] reduces iterations at the cost of accuracy. Loop perforation [5] approximates iteratively-computed results by identifying and reducing loops that contribute only insignificantly to the solution. Another group of approximate algorithms is represented by neural networks, which can be trained to mimic certain algorithms and to compute approximate results [6]. Today, approximate computing is predominantly proposed for applications in multimedia and signal processing with a certain degree of inherent error tolerance. However, in order to fully utilize the benefits of these architectures, the scope of applications has to be significantly extended to other computeintensive tasks, for instance, in science and engineering. Such an extension requires that the allowed error or the required minimum precision of the application is either known beforehand or reliably determined online to deliver trustworthy and useful results. Errors outside the allowed range have to be reliably detected and tackled by appropriate fault tolerance measures.},
  doi = {http://dx.doi.org/10.1109/VTS.2016.7477307},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/VTS_WundeBS2016.pdf}
}
249. Dependable On-Chip Infrastructure for Dependable MPSOCs
Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the 17th IEEE Latin American Test Symposium (LATS'16), Foz do Iguaçu, Brazil, 6-8 April 2016 , pp. 183-188
2016
DOI PDF 
Keywords: Dependability, on-chip infrastructure, reconfigurable scan network, IEEE Std 1687, iJTAG, hardware security
Abstract: Today's MPSOCs employ complex on-chip infrastructure and instrumentation for efficient test, debug, diagnosis, and post-silicon validation, reliability management and maintenance in the field, or monitoring and calibration during operation. To enable flexible and efficient access to such instrumentation, reconfigurable scan networks (RSNs) as recently standardized by IEEE Std 1687 can be used. Given the importance of infrastructure for the dependability of the whole MPSOC, however, the RSN itself must be highly dependable. This paper addresses dependability issues of RSNs including verification, test, and security, and their importance for dependable MPSOCs. First research results are summarized, and open questions for future work are highlighted.
BibTeX:
@inproceedings{KochtW2016,
  author = {Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Dependable On-Chip Infrastructure for Dependable MPSOCs}},
  booktitle = {Proceedings of the 17th IEEE Latin American Test Symposium (LATS'16)},
  year = { 2016 },
  pages = {183-188},
  keywords = { Dependability, on-chip infrastructure, reconfigurable scan network, IEEE Std 1687, iJTAG, hardware security },
  abstract = {Today's MPSOCs employ complex on-chip infrastructure and instrumentation for efficient test, debug, diagnosis, and post-silicon validation, reliability management and maintenance in the field, or monitoring and calibration during operation. To enable flexible and efficient access to such instrumentation, reconfigurable scan networks (RSNs) as recently standardized by IEEE Std 1687 can be used. Given the importance of infrastructure for the dependability of the whole MPSOC, however, the RSN itself must be highly dependable. This paper addresses dependability issues of RSNs including verification, test, and security, and their importance for dependable MPSOCs. First research results are summarized, and open questions for future work are highlighted.},
  doi = {http://dx.doi.org/10.1109/LATW.2016.7483366},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/LATS_KochtW2016.pdf}
}
248. Mixed 01X-RSL-Encoding for Fast and Accurate ATPG with Unknowns
Erb, D., Scheibler, K., Kochte, M.A., Sauer, M., Wunderlich, H.-J. and Becker, B.
Proceedings of the 21st Asia and South Pacific Design Automation Conference (ASP-DAC'16), Macao SAR, China, 25-28 January 2016 , pp. 749-754
2016
DOI PDF 
Keywords: Unknown values, test generation, Restricted symbolic logic, SAT, Stuck-at fault
Abstract: Unknown (X) values in a design introduce pessimism in conventional test generation algorithms which results in a loss of fault coverage. This pessimism is reduced by a more accurate modeling and analysis. Unfortunately, accurate analysis techniques highly increase runtime and limit scalability. One promising technique to prevent high runtimes while still providing high accuracy is the use of restricted symbolic logic (RSL). However, also pure RSL-based algorithms reach their limits as soon as millon gate circuits need to be processed. In this paper, we propose new ATPG techniques to overcome such limitations. An efficient hybrid encoding combines the accuracy of RSL-based modeling with the compactness of conventional threevalued encoding. A low-cost two-valued SAT-based untestability check is able to classify most untestable faults with low runtime. An incremental and event-based accurate fault simulator is introduced to reduce fault simulation effort. The experiments demonstrate the effectiveness of the proposed techniques. Over 97% of the faults are accurately classified. Both the number of aborts and the total runtime are significantly reduced compared to the state-of-the-art pure RSL-based algorithm. For circuits up to a million gates, the fault coverage could be increased considerably compared to a state-of-the-art commercial tool with very competitive runtimes.
BibTeX:
@inproceedings{ErbSKSWB2016,
  author = {Erb, Dominik and Scheibler, Karsten and Kochte, Michael A. and Sauer, Matthias and Wunderlich, Hans-Joachim and Becker, Bernd},
  title = {{Mixed 01X-RSL-Encoding for Fast and Accurate ATPG with Unknowns}},
  booktitle = {Proceedings of the 21st Asia and South Pacific Design Automation Conference (ASP-DAC'16)},
  year = { 2016 },
  pages = {749-754},
  keywords = {Unknown values, test generation, Restricted symbolic logic, SAT, Stuck-at fault},
  abstract = {Unknown (X) values in a design introduce pessimism in conventional test generation algorithms which results in a loss of fault coverage. This pessimism is reduced by a more accurate modeling and analysis. Unfortunately, accurate analysis techniques highly increase runtime and limit scalability. One promising technique to prevent high runtimes while still providing high accuracy is the use of restricted symbolic logic (RSL). However, also pure RSL-based algorithms reach their limits as soon as millon gate circuits need to be processed. In this paper, we propose new ATPG techniques to overcome such limitations. An efficient hybrid encoding combines the accuracy of RSL-based modeling with the compactness of conventional threevalued encoding. A low-cost two-valued SAT-based untestability check is able to classify most untestable faults with low runtime. An incremental and event-based accurate fault simulator is introduced to reduce fault simulation effort. The experiments demonstrate the effectiveness of the proposed techniques. Over 97% of the faults are accurately classified. Both the number of aborts and the total runtime are significantly reduced compared to the state-of-the-art pure RSL-based algorithm. For circuits up to a million gates, the fault coverage could be increased considerably compared to a state-of-the-art commercial tool with very competitive runtimes.},
  doi = {http://dx.doi.org/10.1109/ASPDAC.2016.7428101},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2016/ASPDAC_ErbSKSWB2016.pdf}
}
247. Accurate QBF-based Test Pattern Generation in Presence of Unknown Values
Erb, D., Kochte, M.A., Reimer, S., Sauer, M., Wunderlich, H.-J. and Becker, B.
IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)
Vol. 34(12), December 2015, pp. 2025-2038
2015
DOI PDF 
Keywords: Unknown values, X-values, ATPG, QBF, SAT, stuck-at fault, transition-delay fault
Abstract: Unknown (X) values emerge during the design process as well as during system operation and test application. X-sources are for instance black boxes in design models, clockdomain boundaries, analog-to-digital converters, or uncontrolled or uninitialized sequential elements. To compute a test pattern for a given fault, well-defined logic values are required both for fault activation and propagation to observing outputs. In presence of X-values, conventional test generation algorithms, based on structural algorithms, Boolean satisfiability (SAT), or BDD-based reasoning may fail to generate test patterns or to prove faults untestable. This work proposes the first efficient stuck-at and transitiondelay fault test generation algorithm able to prove testability or untestability of faults in presence of X-values. It overcomes the principal pessimism of conventional algorithms when X-values are considered by mapping the test generation problem to the satisfiability of Quantified Boolean Formulae (QBF). Experiments on ISCAS benchmarks and larger industrial circuits investigate the increase in fault coverage for conventional deterministic and potential detection requirements for both randomized and clustered X-sources.
BibTeX:
@article{ErbKRSWB2015,
  author = {Erb, Dominik and Kochte, Michael A. and Reimer, Sven and Sauer, Matthias and Wunderlich, Hans-Joachim and Becker, Bernd},
  title = {{Accurate QBF-based Test Pattern Generation in Presence of Unknown Values}},
  journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)},
  year = {2015},
  volume = {34},
  number = {12},
  pages = {2025--2038},
  keywords = {Unknown values, X-values, ATPG, QBF, SAT, stuck-at fault, transition-delay fault},
  abstract = { Unknown (X) values emerge during the design process as well as during system operation and test application. X-sources are for instance black boxes in design models, clockdomain boundaries, analog-to-digital converters, or uncontrolled or uninitialized sequential elements. To compute a test pattern for a given fault, well-defined logic values are required both for fault activation and propagation to observing outputs. In presence of X-values, conventional test generation algorithms, based on structural algorithms, Boolean satisfiability (SAT), or BDD-based reasoning may fail to generate test patterns or to prove faults untestable. This work proposes the first efficient stuck-at and transitiondelay fault test generation algorithm able to prove testability or untestability of faults in presence of X-values. It overcomes the principal pessimism of conventional algorithms when X-values are considered by mapping the test generation problem to the satisfiability of Quantified Boolean Formulae (QBF). Experiments on ISCAS benchmarks and larger industrial circuits investigate the increase in fault coverage for conventional deterministic and potential detection requirements for both randomized and clustered X-sources.},
  doi = {http://dx.doi.org/10.1109/TCAD.2015.2440315},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/TCAD_ErbKRSWB2015.pdf}
}
246. Logic/Clock-Path-Aware At-Speed Scan Test Generation for Avoiding False Capture Failures and Reducing Clock Stretch
Asada, K., Wen, X., Holst, S., Miyase, K., Kajihara, S., Kochte, M.A., Schneider, E., Wunderlich, H.-J. and Qian, J.
Proceedings of the 24th IEEE Asian Test Symposium (ATS'15), Mumbai, India, 22-25 November 2015, pp. 103-108
ATS 2015 Best Paper Award
2015
DOI PDF 
Keywords: launch switching activity, IR-drop, logic path, clock path, false capture failure, test clock stretch, X-filling
Abstract: IR-drop induced by launch switching activity (LSA) in capture mode during at-speed scan testing increases delay along not only logic paths (LPs) but also clock paths (CPs). Excessive extra delay along LPs compromises test yields due to false capture failures, while excessive extra delay along CPs compromises test quality due to test clock stretch. This paper is the first to mitigate the impact of LSA on both LPs and CPs with a novel LCPA (Logic/Clock-Path-Aware) at-speed scan test generation scheme, featuring (1) a new metric for assessing the risk of false capture failures based on the amount of LSA around both LPs and CPs, (2) a procedure for avoiding false capture failures by reducing LSA around LPs or masking uncertain test responses, and (3) a procedure for reducing test clock stretch by reducing LSA around CPs. Experimental results demonstrate the effectiveness of the LCPA scheme in improving test yields and test quality.
BibTeX:
@inproceedings{AsadaWHMKKSWQ2015,
  author = {Asada, Koji and Wen, Xiaoqing and Holst, Stefan and Miyase, Kohei and Kajihara, Seiji and Kochte, Michael A. and Schneider, Eric and Wunderlich, Hans-Joachim and Qian, Jun},
  title = {{Logic/Clock-Path-Aware At-Speed Scan Test Generation for Avoiding False Capture Failures and Reducing Clock Stretch}},
  booktitle = {Proceedings of the 24th IEEE Asian Test Symposium (ATS'15)},
  year = {2015},
  pages = {103-108},
  keywords = { launch switching activity, IR-drop, logic path, clock path, false capture failure, test clock stretch, X-filling },
  abstract = {IR-drop induced by launch switching activity (LSA) in capture mode during at-speed scan testing increases delay along not only logic paths (LPs) but also clock paths (CPs). Excessive extra delay along LPs compromises test yields due to false capture failures, while excessive extra delay along CPs compromises test quality due to test clock stretch. This paper is the first to mitigate the impact of LSA on both LPs and CPs with a novel LCPA (Logic/Clock-Path-Aware) at-speed scan test generation scheme, featuring (1) a new metric for assessing the risk of false capture failures based on the amount of LSA around both LPs and CPs, (2) a procedure for avoiding false capture failures by reducing LSA around LPs or masking uncertain test responses, and (3) a procedure for reducing test clock stretch by reducing LSA around CPs. Experimental results demonstrate the effectiveness of the LCPA scheme in improving test yields and test quality.},
  doi = {http://dx.doi.org/10.1109/ATS.2015.25},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/ATS_AsadaWHMKKSWQ2015.pdf}
}
245. Optimized Selection of Frequencies for Faster-Than-at-Speed Test
Kampmann, M., Kochte, M.A., Schneider, E., Indlekofer, T., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 24th IEEE Asian Test Symposium (ATS'15), Mumbai, India, 22-25 November 2015, pp. 109-114
2015
DOI PDF 
Keywords: BIST, small delay defects, delay test, faster-than-at-speed-test
Abstract: Small gate delay faults (SDFs) are not detectable at-speed, if they can only be propagated along short paths. These hidden delay faults (HDFs) do not influence the circuit’s behavior initially, but they may indicate design marginalities leading to early-life failures, and therefore they cannot be neglected. HDFs can be detected by faster-than-at-speed test (FAST), where typically several different frequencies are used to maximize the coverage. A given set of test patterns P potentially detects a HDF if it contains a test pattern sensitizing a path through the fault site, and the efficiency of FAST can be measured as the ratio of actually detected HDFs to potentially detected HDFs. The paper at hand targets maximum test efficiency with a minimum number of frequencies. The procedure starts with a test set for transition delay faults and a set of preselected equidistant frequencies. Timing-accurate simulation of this initial setup identifies the hard-to-detect faults, which are then targeted by a more complex timing-aware ATPG procedure. For the yet undetected HDFs, a minimum number of frequencies are determined using an efficient hypergraph algorithm. Experimental results show that with this approach, the number of test frequencies required for maximum test efficiency can be reduced considerably. Furthermore, test set inflation is limited as timing-aware ATPG is only used for a small subset of HDFs.
BibTeX:
@inproceedings{KampmKSIHW2015,
  author = {Kampmann, Matthias and Kochte, Michael A. and Schneider, Eric and Indlekofer, Thomas and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Optimized Selection of Frequencies for Faster-Than-at-Speed Test}},
  booktitle = {Proceedings of the 24th IEEE Asian Test Symposium (ATS'15)},
  year = {2015},
  pages = {109-114},
  keywords = {BIST, small delay defects, delay test, faster-than-at-speed-test},
  abstract = {Small gate delay faults (SDFs) are not detectable at-speed, if they can only be propagated along short paths. These hidden delay faults (HDFs) do not influence the circuit’s behavior initially, but they may indicate design marginalities leading to early-life failures, and therefore they cannot be neglected. HDFs can be detected by faster-than-at-speed test (FAST), where typically several different frequencies are used to maximize the coverage. A given set of test patterns P potentially detects a HDF if it contains a test pattern sensitizing a path through the fault site, and the efficiency of FAST can be measured as the ratio of actually detected HDFs to potentially detected HDFs. The paper at hand targets maximum test efficiency with a minimum number of frequencies. The procedure starts with a test set for transition delay faults and a set of preselected equidistant frequencies. Timing-accurate simulation of this initial setup identifies the hard-to-detect faults, which are then targeted by a more complex timing-aware ATPG procedure. For the yet undetected HDFs, a minimum number of frequencies are determined using an efficient hypergraph algorithm. Experimental results show that with this approach, the number of test frequencies required for maximum test efficiency can be reduced considerably. Furthermore, test set inflation is limited as timing-aware ATPG is only used for a small subset of HDFs.},
  doi = {http://dx.doi.org/10.1109/ATS.2015.26},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/ATS_KampmKSIHW2015.pdf}
}
244. Intermittent and Transient Fault Diagnosis on Sparse Code Signatures
Kochte, M., Dalirsani, A., Bernabei, A., Omana, M., Metra, C. and Wunderlich, H.-J.
Proceedings of the 24th IEEE Asian Test Symposium (ATS'15), Mumbai, India, 22-25 November 2015, pp. 157-162
2015
DOI PDF 
Keywords: Diagnosis, intermittent, transient, concurrent error detection, code signature, self-checking, online testing
Abstract: Failure diagnosis of field returns typically requires high quality test stimuli and assumes that tests can be repeated. For intermittent faults with fault activation conditions depending on the physical environment, the repetition of tests cannot ensure that the behavior in the field is also observed during diagnosis, causing field returns diagnosed as no-trouble-found. In safety critical applications, self-checking circuits, which provide concurrent error detection, are frequently used. To diagnose intermittent and transient faulty behavior in such circuits, we use the stored encoded circuit outputs in case of a failure (called signatures) for later analysis in diagnosis. For the first time, a diagnosis algorithm is presented that is capable of performing the classification of intermittent or transient faults using only the very limited amount of functional stimuli and signatures observed during operation and stored on chip. The experimental results demonstrate that even with these harsh limitations it is possible to distinguish intermittent from transient faulty behavior. This is essential to determine whether a circuit in which failures have been observed should be subject to later physical failure analysis, since intermittent faulty behavior has been diagnosed. In case of transient faulty behavior, it may still be operated reliably.
BibTeX:
@inproceedings{KochtDBOMW2015,
  author = {Kochte, Michael and Dalirsani, Atefe and Bernabei, Andrea and Omana, Martin and Metra, Cecilia and Wunderlich, Hans-Joachim},
  title = {{Intermittent and Transient Fault Diagnosis on Sparse Code Signatures}},
  booktitle = {Proceedings of the 24th IEEE Asian Test Symposium (ATS'15)},
  year = {2015},
  pages = {157-162},
  keywords = { Diagnosis, intermittent, transient, concurrent error detection, code signature, self-checking, online testing },
  abstract = {Failure diagnosis of field returns typically requires high quality test stimuli and assumes that tests can be repeated. For intermittent faults with fault activation conditions depending on the physical environment, the repetition of tests cannot ensure that the behavior in the field is also observed during diagnosis, causing field returns diagnosed as no-trouble-found. In safety critical applications, self-checking circuits, which provide concurrent error detection, are frequently used. To diagnose intermittent and transient faulty behavior in such circuits, we use the stored encoded circuit outputs in case of a failure (called signatures) for later analysis in diagnosis. For the first time, a diagnosis algorithm is presented that is capable of performing the classification of intermittent or transient faults using only the very limited amount of functional stimuli and signatures observed during operation and stored on chip. The experimental results demonstrate that even with these harsh limitations it is possible to distinguish intermittent from transient faulty behavior. This is essential to determine whether a circuit in which failures have been observed should be subject to later physical failure analysis, since intermittent faulty behavior has been diagnosed. In case of transient faulty behavior, it may still be operated reliably.},
  doi = {http://dx.doi.org/10.1109/ATS.2015.34},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/ATS_KochtDBOMW2015.pdf}
}
243. STRAP: Stress-Aware Placement for Aging Mitigation in Runtime Reconfigurable Architectures
Zhang, H., Kochte, M.A., Schneider, E., Bauer, L., Wunderlich, H.-J. and Henkel, J.
Proceedings of the 34th IEEE/ACM International Conference on Computer-Aided Design (ICCAD'15), Austin, Texas, USA, 2-6 November 2015, pp. 38-45
2015
URL PDF 
Abstract: Aging effects in nano-scale CMOS circuits impair the reliability and Mean Time to Failure (MTTF) of embedded systems. Especially for FPGAs that are manufactured in the latest technology node, aging is a major concern. We introduce the first cross-layer aging-aware placement method for accelerators in FPGA-based runtime reconfigurable architectures. It optimizes stress distribution by accelerator placement at runtime, i.e. to which reconfigurable region an accelerator shall be reconfigured. Additionally, it optimizes logic placement at synthesis time to diversify the resource usage of individual accelerators, i.e. which CLBs of a reconfigurable region shall be used by an accelerator. Both layers together balance the intra- and inter-region stress induced by the application workload at negligible performance cost. Experimental results show significant reduction of maximum stress of up to 64% and 35%, which leads to up to 177% and 14% MTTF improvement relative to state-of- the-art methods w.r.t. HCI and BTI aging, respectively.
BibTeX:
@inproceedings{ZhangKSBWH2015,
  author = {Zhang, Hongyan and Kochte, Michael A. and Schneider, Eric and Bauer, Lars and Wunderlich, Hans-Joachim and Henkel, Jörg},
  title = {{STRAP: Stress-Aware Placement for Aging Mitigation in Runtime Reconfigurable Architectures}},
  booktitle = {Proceedings of the 34th IEEE/ACM International Conference on Computer-Aided Design (ICCAD'15)},
  year = {2015},
  pages = {38-45},
  abstract = {Aging effects in nano-scale CMOS circuits impair the reliability and Mean Time to Failure (MTTF) of embedded systems. Especially for FPGAs that are manufactured in the latest technology node, aging is a major concern. We introduce the first cross-layer aging-aware placement method for accelerators in FPGA-based runtime reconfigurable architectures. It optimizes stress distribution by accelerator placement at runtime, i.e. to which reconfigurable region an accelerator shall be reconfigured. Additionally, it optimizes logic placement at synthesis time to diversify the resource usage of individual accelerators, i.e. which CLBs of a reconfigurable region shall be used by an accelerator. Both layers together balance the intra- and inter-region stress induced by the application workload at negligible performance cost. Experimental results show significant reduction of maximum stress of up to 64% and 35%, which leads to up to 177% and 14% MTTF improvement relative to state-of- the-art methods w.r.t. HCI and BTI aging, respectively.},
  url = { http://dl.acm.org/citation.cfm?id=2840825 },
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/ICCAD_ZhangKSBWH2015.pdf}
}
242. Low-Overhead Fault-Tolerance for the Preconditioned Conjugate Gradient Solver
Schöll, A., Braun, C., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the International Symposium on Defect and Fault Tolerance in VLSI and Nanotechnology Systems (DFT'15), Amherst, Massachusetts, USA, 12-14 October 2015, pp. 60-65
2015
DOI PDF 
Keywords: Fault Tolerance, Sparse Linear System Solving, Preconditioned Conjugate Gradient, ABFT
Abstract: Linear system solvers are an integral part for many different compute-intensive applications and they benefit from the compute power of heterogeneous computer architectures. However, the growing spectrum of reliability threats for such nano-scaled CMOS devices makes the integration of fault tolerance mandatory. The preconditioned conjugate gradient (PCG) method is one widely used solver as it finds solutions typically faster compared to direct methods. Although this iterative approach is able to tolerate certain errors, latest research shows that the PCG solver is still vulnerable to transient effects. Even single errors, for instance, caused by marginal hardware, harsh environments, or particle radiation, can considerably affect execution times, or lead to silent data corruption. In this work, a novel fault-tolerant PCG solver with extremely low runtime overhead is proposed. Since the error detection method does not involve expensive operations, it scales very well with increasing problem sizes. In case of errors, the method selects between three different correction methods according to the identified error. Experimental results show a runtime overhead for error detection ranging only from 0.04% to 1.70%.
BibTeX:
@inproceedings{SchoeBKW2015a,
  author = {Schöll, Alexander and Braun, Claus and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Low-Overhead Fault-Tolerance for the Preconditioned Conjugate Gradient Solver}},
  booktitle = {Proceedings of the International Symposium on Defect and Fault Tolerance in VLSI and Nanotechnology Systems (DFT'15)},
  year = {2015},
  pages = {60-65},
  keywords = { Fault Tolerance, Sparse Linear System Solving, Preconditioned Conjugate Gradient, ABFT },
  abstract = {Linear system solvers are an integral part for many different compute-intensive applications and they benefit from the compute power of heterogeneous computer architectures. However, the growing spectrum of reliability threats for such nano-scaled CMOS devices makes the integration of fault tolerance mandatory. The preconditioned conjugate gradient (PCG) method is one widely used solver as it finds solutions typically faster compared to direct methods. Although this iterative approach is able to tolerate certain errors, latest research shows that the PCG solver is still vulnerable to transient effects. Even single errors, for instance, caused by marginal hardware, harsh environments, or particle radiation, can considerably affect execution times, or lead to silent data corruption. In this work, a novel fault-tolerant PCG solver with extremely low runtime overhead is proposed. Since the error detection method does not involve expensive operations, it scales very well with increasing problem sizes. In case of errors, the method selects between three different correction methods according to the identified error. Experimental results show a runtime overhead for error detection ranging only from 0.04% to 1.70%. },
  doi = {http://dx.doi.org/10.1109/DFT.2015.7315136},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/DFTS_SchoeBKW2015.pdf}
}
241. Multi-Layer Test and Diagnosis for Dependable NoCs
Wunderlich, H.-J. and Radetzki, M.
Proceedings of the 9th IEEE/ACM International Symposium on Networks-on-Chip (NOCS'15), Vancouver, BC, Canada, 28-30 September 2015
2015
DOI PDF 
Keywords: Test, diagnosis, fault tolerance, network-on-chip, cross-layer
Abstract: Networks-on-chip are inherently fault tolerant or at least gracefully degradable as both, connectivity and amount of resources, provide some useful redundancy. These properties can only be exploited extensively if test and diagnosis techniques support fault detection and error containment in an optimized way. On the one hand, all faulty components have to be isolated, and on the other hand, remaining fault-free functionalities have to be kept operational.
In this contribution, behavioral end-to-end error detection is considered together with functional test methods for switches and gate level diagnosis to locate and to isolate faults in the network in an efficient way with low time overhead.
BibTeX:
@inproceedings{WundeR2015,
  author = {Wunderlich, Hans-Joachim and Radetzki, Martin},
  title = {{Multi-Layer Test and Diagnosis for Dependable NoCs}},
  booktitle = {Proceedings of the 9th IEEE/ACM International Symposium on Networks-on-Chip (NOCS'15)},
  year = {2015},
  keywords = { Test, diagnosis, fault tolerance, network-on-chip, cross-layer },
  abstract = {Networks-on-chip are inherently fault tolerant or at least gracefully degradable as both, connectivity and amount of resources, provide some useful redundancy. These properties can only be exploited extensively if test and diagnosis techniques support fault detection and error containment in an optimized way. On the one hand, all faulty components have to be isolated, and on the other hand, remaining fault-free functionalities have to be kept operational. 
In this contribution, behavioral end-to-end error detection is considered together with functional test methods for switches and gate level diagnosis to locate and to isolate faults in the network in an efficient way with low time overhead.}, doi = {http://dx.doi.org/10.1145/2786572.2788708}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/NOCS_WundeR2015.pdf} }
240. Efficient Observation Point Selection for Aging Monitoring
Liu, C., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the 21st IEEE International On-Line Testing Symposium (IOLTS'15), Elia, Halkidiki, Greece, 6-8 July 2015, pp. 176-181
2015
DOI PDF 
Keywords: Aging monitoring, delay monitoring, online test, concurrent test, stability checker, path selection
Abstract: Circuit aging causes a performance degradation and eventually a functional failure. It depends on the workload and the environmental condition of the system, which are hard to predict in early design phases resulting in pessimistic worst case design. Existing delay monitoring schemes measure the remaining slack of paths in the circuit, but cause a significant hardware penalty including global wiring. More importantly, the low sensitization ratio of long paths in applications may lead to a very low measurement frequency or even an unmonitored timing violation. In this work, we propose a delay monitor placement method by analyzing the topological circuit structure and sensitization of paths. The delay monitors are inserted at meticulously selected positions in the circuit, named observation points (OPs). This OP monitor placement method can reduce the number of inserted monitors by up to 98% compared to a placement at the end of long paths. The experimental validation shows the effectiveness of this aging indication, i.e. a monitor issues a timing alert always earlier than any imminent timing failure.
BibTeX:
@inproceedings{LiuKW2015,
  author = {Liu, Chang and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Efficient Observation Point Selection for Aging Monitoring}},
  booktitle = {Proceedings of the 21st IEEE International On-Line Testing Symposium (IOLTS'15)},
  year = {2015},
  pages = {176--181},
  keywords = {Aging monitoring, delay monitoring, online test, concurrent test, stability checker, path selection},
  abstract = {Circuit aging causes a performance degradation and eventually a functional failure. It depends on the workload and the environmental condition of the system, which are hard to predict in early design phases resulting in pessimistic worst case design. Existing delay monitoring schemes measure the remaining slack of paths in the circuit, but cause a significant hardware penalty including global wiring. More importantly, the low sensitization ratio of long paths in applications may lead to a very low measurement frequency or even an unmonitored timing violation. In this work, we propose a delay monitor placement method by analyzing the topological circuit structure and sensitization of paths. The delay monitors are inserted at meticulously selected positions in the circuit, named observation points (OPs). This OP monitor placement method can reduce the number of inserted monitors by up to 98% compared to a placement at the end of long paths. The experimental validation shows the effectiveness of this aging indication, i.e. a monitor issues a timing alert always earlier than any imminent timing failure.},
  doi = {http://dx.doi.org/10.1109/IOLTS.2015.7229855},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/IOLTS_LiuKW2015.pdf}
}
239. Efficient On-Line Fault-Tolerance for the Preconditioned Conjugate Gradient Method
Schöll, A., Braun, C., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the 21st IEEE International On-Line Testing Symposium (IOLTS'15), Elia, Halkidiki, Greece, 6-8 July 2015, pp. 95-100
2015
DOI PDF 
Keywords: Sparse Linear System Solving, Fault Tolerance, Preconditioned Conjugate Gradient, ABFT
Abstract: Linear system solvers are key components of many scientific applications and they can benefit significantly from modern heterogeneous computer architectures. However, such nano-scaled CMOS devices face an increasing number of reliability threats, which make the integration of fault tolerance mandatory. The preconditioned conjugate gradient method (PCG) is a very popular solver since it typically finds solutions faster than direct methods, and it is less vulnerable to transient effects. However, as latest research shows, the vulnerability is still considerable. Even single errors caused, for instance, by marginal hardware, harsh operating conditions or particle radiation can increase execution times considerably or corrupt solutions without indication. In this work, a novel and highly efficient fault-tolerant PCG method is presented. The method applies only two inner products to reliably detect errors. In case of errors, the method automatically selects between roll-back and efficient on-line correction. This significantly reduces the error detection overhead and expensive re-computations.
BibTeX:
@inproceedings{SchoeBKW2015,
  author = {Schöll, Alexander and Braun, Claus and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Efficient On-Line Fault-Tolerance for the Preconditioned Conjugate Gradient Method}},
  booktitle = {Proceedings of the 21st IEEE International On-Line Testing Symposium (IOLTS'15)},
  year = {2015},
  pages = {95--100},
  keywords = {Sparse Linear System Solving, Fault Tolerance, Preconditioned Conjugate Gradient, ABFT},
  abstract = {Linear system solvers are key components of many scientific applications and they can benefit significantly from modern heterogeneous computer architectures. However, such nano-scaled CMOS devices face an increasing number of reliability threats, which make the integration of fault tolerance mandatory. The preconditioned conjugate gradient method (PCG) is a very popular solver since it typically finds solutions faster than direct methods, and it is less vulnerable to transient effects. However, as latest research shows, the vulnerability is still considerable. Even single errors caused, for instance, by marginal hardware, harsh operating conditions or particle radiation can increase execution times considerably or corrupt solutions without indication. In this work, a novel and highly efficient fault-tolerant PCG method is presented. The method applies only two inner products to reliably detect errors. In case of errors, the method automatically selects between roll-back and efficient on-line correction. This significantly reduces the error detection overhead and expensive re-computations.},
  doi = {http://dx.doi.org/10.1109/IOLTS.2015.7229839},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/IOLTS_SchoeBKW2015.pdf}
}
238. Adaptive Multi-Layer Techniques for Increased System Dependability
Bauer, L., Henkel, J., Herkersdorf, A., Kochte, M.A., Kühn, J.M., Rosenstiel, W., Schweizer, T., Wallentowitz, S., Wenzel, V., Wild, T., Wunderlich, H.-J. and Zhang, H.
it - Information Technology
Vol. 57(3), 8 June 2015, pp. 149-158
2015
DOI PDF 
Keywords: Dependability, fault tolerance, graceful degradation, aging mitigation, online test and error detection, thermal management, multi-core architecture, reconfigurable architecture
Abstract: Achieving system-level dependability is a demanding task. The manifold requirements and dependability threats can no longer be statically addressed at individual abstraction layers. Instead, all components of future multi-processor systems-on-chip (MPSoCs) have to contribute to this common goal in an adaptive manner.
In this paper we target a generic heterogeneous MPSoC that combines general purpose processors along with dedicated application-specific hard-wired accelerators, fine-grained reconfigurable processors, and coarse-grained reconfigurable architectures. We present different reactive and proactive measures at the layers of the runtime system (online resource management), system architecture (global communication), micro architecture (individual tiles), and gate netlist (tile-internal circuits) to address dependability threats.
BibTeX:
@article{BauerHHKKRSWWWWZ2015,
  author = {Bauer, Lars and Henkel, Jörg and Herkersdorf, Andreas and Kochte, Michael A. and Kühn, Johannes M. and Rosenstiel, Wolfgang and Schweizer, Thomas and Wallentowitz, Stefan and Wenzel, Volker and Wild, Thomas and Wunderlich, Hans-Joachim and Zhang, Hongyan},
  title = {{Adaptive Multi-Layer Techniques for Increased System Dependability}},
  journal = {it - Information Technology},
  year = {2015},
  volume = {57},
  number = {3},
  pages = {149--158},
  keywords = {Dependability, fault tolerance, graceful degradation, aging mitigation, online test and error detection, thermal management, multi-core architecture, reconfigurable architecture},
  abstract = {Achieving system-level dependability is a demanding task. The manifold requirements and dependability threats can no longer be statically addressed at individual abstraction layers. Instead, all components of future multi-processor systems-on-chip (MPSoCs) have to contribute to this common goal in an adaptive manner.
In this paper we target a generic heterogeneous MPSoC that combines general purpose processors along with dedicated application-specific hard-wired accelerators, fine-grained reconfigurable processors, and coarse-grained reconfigurable architectures. We present different reactive and proactive measures at the layers of the runtime system (online resource management), system architecture (global communication), micro architecture (individual tiles), and gate netlist (tile-internal circuits) to address dependability threats.}, doi = {http://dx.doi.org/10.1515/itit-2014-1082}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/ITIT_BauerHHKKRSWWWWZ2015.pdf} }
237. Fine-Grained Access Management in Reconfigurable Scan Networks
Baranowski, R., Kochte, M.A. and Wunderlich, H.-J.
IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)
Vol. 34(6), June 2015, pp. 937-946
2015
DOI PDF 
Keywords: Debug and diagnosis, reconfigurable scan network, IJTAG, IEEE Std 1687, secure DFT, hardware security, instrument protection
Abstract: Modern VLSI designs incorporate a high amount of instrumentation that supports post-silicon validation and debug, volume test and diagnosis, as well as in-field system monitoring and maintenance. Reconfigurable scan architectures, as allowed by the novel IEEE Std 1149.1-2013 (JTAG) and IEEE Std 1687- 2014 (IJTAG), emerge as a scalable mechanism for access to such on-chip instruments. While the on-chip instrumentation is crucial for meeting quality, dependability, and time-to-market goals, it is prone to abuse and threatens system safety and security. A secure access management method is mandatory to assure that critical instruments be accessible to authorized entities only. This work presents a novel protection method for fine-grained access management in complex reconfigurable scan networks based on a challenge-response authentication protocol. The target scan network is extended with an authorization instrument and Secure Segment Insertion Bits (S²IB) that together control the accessibility of individual instruments. To the best of the authors’ knowledge, this is the first fine-grained access management scheme that scales well with the number of protected instruments and offers a high level of security. Compared with recent stateof- the-art techniques, this scheme is more favorable with respect to implementation cost, performance overhead, and provided security level.
BibTeX:
@article{BaranKW2015a,
  author = {Baranowski, Rafal and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Fine-Grained Access Management in Reconfigurable Scan Networks}},
  journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)},
  year = {2015},
  volume = {34},
  number = {6},
  pages = {937--946},
  keywords = {Debug and diagnosis, reconfigurable scan network, IJTAG, IEEE Std 1687, secure DFT, hardware security, instrument protection},
  abstract = {Modern VLSI designs incorporate a high amount of instrumentation that supports post-silicon validation and debug, volume test and diagnosis, as well as in-field system monitoring and maintenance. Reconfigurable scan architectures, as allowed by the novel IEEE Std 1149.1-2013 (JTAG) and IEEE Std 1687- 2014 (IJTAG), emerge as a scalable mechanism for access to such on-chip instruments. While the on-chip instrumentation is crucial for meeting quality, dependability, and time-to-market goals, it is prone to abuse and threatens system safety and security. A secure access management method is mandatory to assure that critical instruments be accessible to authorized entities only. This work presents a novel protection method for fine-grained access management in complex reconfigurable scan networks based on a challenge-response authentication protocol. The target scan network is extended with an authorization instrument and Secure Segment Insertion Bits (S²IB) that together control the accessibility of individual instruments. To the best of the authors’ knowledge, this is the first fine-grained access management scheme that scales well with the number of protected instruments and offers a high level of security. Compared with recent stateof- the-art techniques, this scheme is more favorable with respect to implementation cost, performance overhead, and provided security level.},
  doi = {http://dx.doi.org/10.1109/TCAD.2015.2391266},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/TCAD_BaranKW2015.pdf}
}
236. High-Throughput Logic Timing Simulation on GPGPUs
Holst, S., Imhof, M.E. and Wunderlich, H.-J.
ACM Transactions on Design Automation of Electronic Systems (TODAES)
Vol. 20(3), Jun 2015, pp. 37:1-37:21
2015
DOI URL PDF 
Keywords: Verification, Performance, Gate-Level Simulation, General Purpose computing on Graphics Processing Unit (GP-GPU), Hazards, Parallel CAD, Pin-to-Pin Delay, Pulse-Filtering, Timing Simulation
Abstract: Many EDA tasks like test set characterization or the precise estimation of power consumption, power droop and temperature development, require a very large number of time-aware gate-level logic simulations. Until now, such characterizations have been feasible only for rather small designs or with reduced precision due to the high computational demands. The new simulation system presented here is able to accelerate such tasks by more than two orders of magnitude and provides for the first time fast and comprehensive timing simulations for industrial-sized designs. Hazards, pulse-filtering, and pin-to-pin delay are supported for the first time in a GPGPU accelerated simulator, and the system can easily be extended to even more realistic delay models and further applications. A sophisticated mapping with efficient memory utilization and access patterns as well as minimal synchronizations and control flow divergence is able to use the full potential of GPGPU architectures. To provide such a mapping, we combine for the first time the versatility of event-based timing simulation and multidimensional parallelism used in GPU-based gate-level simulators. The result is a throughput-optimized timing simulation algorithm, which runs many simulation instances in parallel and at the same time fully exploits gate-parallelism within the circuit.
BibTeX:
@article{HolstIW2015,
  author = {Holst, Stefan and Imhof, Michael E. and Wunderlich, Hans-Joachim},
  title = {{High-Throughput Logic Timing Simulation on GPGPUs}},
  journal = {ACM Transactions on Design Automation of Electronic Systems (TODAES)},
  year = {2015},
  volume = {20},
  number = {3},
  pages = {37:1--37:21},
  keywords = {Verification, Performance, Gate-Level Simulation, General Purpose computing on Graphics Processing Unit (GP-GPU), Hazards, Parallel CAD, Pin-to-Pin Delay, Pulse-Filtering, Timing Simulation},
  abstract = {Many EDA tasks like test set characterization or the precise estimation of power consumption, power droop and temperature development, require a very large number of time-aware gate-level logic simulations. Until now, such characterizations have been feasible only for rather small designs or with reduced precision due to the high computational demands. The new simulation system presented here is able to accelerate such tasks by more than two orders of magnitude and provides for the first time fast and comprehensive timing simulations for industrial-sized designs. Hazards, pulse-filtering, and pin-to-pin delay are supported for the first time in a GPGPU accelerated simulator, and the system can easily be extended to even more realistic delay models and further applications. A sophisticated mapping with efficient memory utilization and access patterns as well as minimal synchronizations and control flow divergence is able to use the full potential of GPGPU architectures. To provide such a mapping, we combine for the first time the versatility of event-based timing simulation and multidimensional parallelism used in GPU-based gate-level simulators. The result is a throughput-optimized timing simulation algorithm, which runs many simulation instances in parallel and at the same time fully exploits gate-parallelism within the circuit.},
  url = {http://dl.acm.org/citation.cfm?id=2714564},
  doi = {http://dx.doi.org/10.1145/2714564},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/TODAES_HolstIW2015.pdf}
}
235. On-Line Prediction of NBTI-induced Aging Rates
Baranowski, R., Firouzi, F., Kiamehr, S., Liu, C., Tahoori, M. and Wunderlich, H.-J.
Proceedings of the ACM/IEEE Conference on Design, Automation Test in Europe (DATE'15), Grenoble, France, 9-13 March 2015, pp. 589-592
2015
URL PDF 
Keywords: Representative critical gates, Workload monitoring, Aging prediction, NBTI
Abstract: Nanoscale technologies are increasingly susceptible to aging processes such as Negative-Bias Temperature Instability (NBTI) which undermine the reliability of VLSI systems. Existing monitoring techniques can detect the violation of safety margins and hence make the prediction of an imminent failure possible. However, since such techniques can only detect measurable degradation effects which appear after a relatively long period of system operation, they are not well suited to early aging prediction and proactive aging alleviation. This work presents a novel method for the monitoring of NBTI-induced degradation rate in digital circuits. It enables the timely adoption of proper mitigation techniques that reduce the impact of aging. The developed method employs machine learning techniques to find a small set of so called Representative Critical Gates (RCG), the workload of which is correlated with the degradation of the entire circuit. The workload of RCGs is observed in hardware using so called workload monitors. The output of the workload monitors is evaluated on-line to predict system degradation experienced within a configurable (short) period of time, e.g. a fraction of a second. Experimental results show that the developed monitors predict the degradation rate with an average error of only 1.6% at 4.2% area overhead.
BibTeX:
@inproceedings{BaranFKLWT2015,
  author = { Baranowski, Rafal and Firouzi, Farshad and Kiamehr, Saman and Liu, Chang and Tahoori, Mehdi and Wunderlich, Hans-Joachim },
  title = {{On-Line Prediction of NBTI-induced Aging Rates}},
  booktitle = {Proceedings of the ACM/IEEE Conference on Design, Automation Test in Europe (DATE'15)},
  year = {2015},
  pages = {589--592},
  keywords = {Representative critical gates, Workload monitoring, Aging prediction, NBTI},
  abstract = {Nanoscale technologies are increasingly susceptible to aging processes such as Negative-Bias Temperature Instability (NBTI) which undermine the reliability of VLSI systems. Existing monitoring techniques can detect the violation of safety margins and hence make the prediction of an imminent failure possible. However, since such techniques can only detect measurable degradation effects which appear after a relatively long period of system operation, they are not well suited to early aging prediction and proactive aging alleviation. This work presents a novel method for the monitoring of NBTI-induced degradation rate in digital circuits. It enables the timely adoption of proper mitigation techniques that reduce the impact of aging. The developed method employs machine learning techniques to find a small set of so called Representative Critical Gates (RCG), the workload of which is correlated with the degradation of the entire circuit. The workload of RCGs is observed in hardware using so called workload monitors. The output of the workload monitors is evaluated on-line to predict system degradation experienced within a configurable (short) period of time, e.g. a fraction of a second. Experimental results show that the developed monitors predict the degradation rate with an average error of only 1.6% at 4.2% area overhead.},
  url = { http://dl.acm.org/citation.cfm?id=2755886 },
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/DATE_BaranFKLTW2015.pdf}
}
234. GPU-Accelerated Small Delay Fault Simulation
Schneider, E., Holst, S., Kochte, M.A., Wen, X. and Wunderlich, H.-J.
Proceedings of the ACM/IEEE Conference on Design, Automation Test in Europe (DATE'15), Grenoble, France, 9-13 March 2015, pp. 1174-1179
Best Paper Candidate
2015
URL PDF 
Abstract: The simulation of delay faults is an essential task in design validation and reliability assessment of circuits. Due to the high sensitivity of current nano-scale designs against smallest delay deviations, small delay faults recently became the focus of test research. Because of the subtle delay impact, traditional fault simulation approaches based on abstract timing models are not sufficient for representing small delay faults. Hence, timing accurate simulation approaches have to be utilized, which quickly become inapplicable for larger designs due to high computational requirements. In this work we present a waveform-accurate approach for fast high-throughput small delay fault simulation on Graphics Processing Units (GPUs). By exploiting parallelism from gates, faults and patterns, the proposed approach enables accurate exhaustive small delay fault simulation even for multi-million gate designs without fault dropping for the first time.
BibTeX:
@inproceedings{SchneHKWW2015,
  author = { Schneider, Eric and Holst, Stefan and Kochte, Michael A. and Wen, Xiaoqing and Wunderlich, Hans-Joachim },
  title = {{GPU-Accelerated Small Delay Fault Simulation}},
  booktitle = {Proceedings of the ACM/IEEE Conference on Design, Automation Test in Europe (DATE'15)},
  year = {2015},
  pages = {1174--1179},
  abstract = {The simulation of delay faults is an essential task in design validation and reliability assessment of circuits. Due to the high sensitivity of current nano-scale designs against smallest delay deviations, small delay faults recently became the focus of test research. Because of the subtle delay impact, traditional fault simulation approaches based on abstract timing models are not sufficient for representing small delay faults. Hence, timing accurate simulation approaches have to be utilized, which quickly become inapplicable for larger designs due to high computational requirements. In this work we present a waveform-accurate approach for fast high-throughput small delay fault simulation on Graphics Processing Units (GPUs). By exploiting parallelism from gates, faults and patterns, the proposed approach enables accurate exhaustive small delay fault simulation even for multi-million gate designs without fault dropping for the first time.},
  url = { http://dl.acm.org/citation.cfm?id=2757084 },
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/DATE_SchneHKWW2015.pdf}
}
233. Reconfigurable Scan Networks: Modeling, Verification, and Optimal Pattern Generation
Baranowski, R., Kochte, M.A. and Wunderlich, H.-J.
ACM Transactions on Design Automation of Electronic Systems (TODAES)
Vol. 20(2), February 2015, pp. 30:1-30:27
2015
DOI PDF 
Keywords: Algorithms, Verification, Performance
Abstract: Efficient access to on-chip instrumentation is a key requirement for post-silicon validation, test, debug, bringup, and diagnosis. Reconfigurable scan networks, as proposed by e.g. IEEE P1687 and IEEE Std 1149.1-2013, emerge as an effective and affordable means to cope with the increasing complexity of on-chip infrastructure. Reconfigurable scan networks are often hierarchical and may have complex structural and functional dependencies. Common approaches for scan verification based on static structural analysis and functional simulation are not sufficient to ensure correct operation of these types of architectures. To access an instrument in a reconfigurable scan network, a scan-in bit sequence must be generated according to the current state and structure of the network. Due to sequential and combinational dependencies, the access pattern generation process (pattern retargeting) poses a complex decision and optimization problem. This article presents the first generalized formal model that considers structural and functional dependencies of reconfigurable scan networks and is directly applicable to P1687-based and 1149.1-2013-based scan architectures. This model enables efficient formal verification of complex scan networks, as well as automatic generation of access patterns. The proposed pattern generation method supports concurrent access to multiple target scan registers (access merging) and generates short scan-in sequences.
BibTeX:
@article{BaranKW2015,
  author = {Baranowski, Rafal and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Reconfigurable Scan Networks: Modeling, Verification, and Optimal Pattern Generation}},
  journal = {ACM Transactions on Design Automation of Electronic Systems (TODAES)},
  year = {2015},
  volume = {20},
  number = {2},
  pages = {30:1--30:27},
  keywords = {Algorithms, Verification, Performance},
  abstract = {Efficient access to on-chip instrumentation is a key requirement for post-silicon validation, test, debug, bringup, and diagnosis. Reconfigurable scan networks, as proposed by e.g. IEEE P1687 and IEEE Std 1149.1-2013, emerge as an effective and affordable means to cope with the increasing complexity of on-chip infrastructure. Reconfigurable scan networks are often hierarchical and may have complex structural and functional dependencies. Common approaches for scan verification based on static structural analysis and functional simulation are not sufficient to ensure correct operation of these types of architectures. To access an instrument in a reconfigurable scan network, a scan-in bit sequence must be generated according to the current state and structure of the network. Due to sequential and combinational dependencies, the access pattern generation process (pattern retargeting) poses a complex decision and optimization problem. This article presents the first generalized formal model that considers structural and functional dependencies of reconfigurable scan networks and is directly applicable to P1687-based and 1149.1-2013-based scan architectures. This model enables efficient formal verification of complex scan networks, as well as automatic generation of access patterns. The proposed pattern generation method supports concurrent access to multiple target scan registers (access merging) and generates short scan-in sequences.},
  doi = {http://dx.doi.org/10.1145/2699863},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2015/TODAES_BaranKW2015.pdf}
}
232. Access Port Protection for Reconfigurable Scan Networks
Baranowski, R., Kochte, M.A. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 30(6), December 2014, pp. 711-723
2014 JETTA-TTTC Best Paper Award
2014
DOI URL PDF 
Keywords: Debug and diagnosis, reconfigurable scan network, IJTAG, IEEE P1687, secure DFT, hardware security
Abstract: Scan infrastructures based on IEEE Std. 1149.1 (JTAG), 1500 (SECT), and P1687 (IJTAG) provide a cost-effective access mechanism for test, reconfiguration, and debugging purposes. The improved accessibility of on-chip instruments, however, poses a serious threat to system safety and security. While state-of-theart protection methods for scan architectures compliant with JTAG and SECT are very effective, most of these techniques face scalability issues in reconfigurable scan networks allowed by the upcoming IJTAG standard. This paper describes a scalable solution for multilevel access management in reconfigurable scan networks. The access to protected instruments is restricted locally at the interface to the network. The access restriction is realized by a sequence filter that allows only a precomputed set of scan-in access sequences. This approach does not require any modification of the scan architecture and causes no access time penalty. Therefore, it is well suited for core-based designs with hard macros and 3D integrated circuits. Experimental results for complex reconfigurable scan networks show that the area overhead depends primarily on the number of allowed accesses, and is marginal even if this number exceeds the count of registers in the network.
BibTeX:
@article{BaranKW2014a,
  author = {Baranowski, Rafal and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Access Port Protection for Reconfigurable Scan Networks}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {2014},
  volume = {30},
  number = {6},
  pages = {711--723},
  keywords = {Debug and diagnosis, reconfigurable scan network, IJTAG, IEEE P1687, secure DFT, hardware security},
  abstract = {Scan infrastructures based on IEEE Std. 1149.1 (JTAG), 1500 (SECT), and P1687 (IJTAG) provide a cost-effective access mechanism for test, reconfiguration, and debugging purposes. The improved accessibility of on-chip instruments, however, poses a serious threat to system safety and security. While state-of-theart protection methods for scan architectures compliant with JTAG and SECT are very effective, most of these techniques face scalability issues in reconfigurable scan networks allowed by the upcoming IJTAG standard. This paper describes a scalable solution for multilevel access management in reconfigurable scan networks. The access to protected instruments is restricted locally at the interface to the network. The access restriction is realized by a sequence filter that allows only a precomputed set of scan-in access sequences. This approach does not require any modification of the scan architecture and causes no access time penalty. Therefore, it is well suited for core-based designs with hard macros and 3D integrated circuits. Experimental results for complex reconfigurable scan networks show that the area overhead depends primarily on the number of allowed accesses, and is marginal even if this number exceeds the count of registers in the network.},
  url = { http://link.springer.com/article/10.1007/s10836-014-5484-2 },
  doi = {http://dx.doi.org/10.1007/s10836-014-5484-2},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/JETTA_BaranKW2014.pdf}
}
231. On Covering Structural Defects in NoCs by Functional Tests
Dalirsani, A., Hatami, N., Imhof, M.E., Eggenberger, M., Schley, G., Radetzki, M. and Wunderlich, H.-J.
Proceedings of the 23rd IEEE Asian Test Symposium (ATS'14), Hangzhou, China, 16-19 November 2014, pp. 87-92
2014
DOI PDF 
Keywords: Network-on-Chip (NoC), Functional Test, Functional Failure Modeling, Fault Classification, Boolean Satisfiability (SAT)
Abstract: Structural tests provide high defect coverage by considering the low-level circuit details. Functional test provides a faster test with reduced test patterns and does not imply additional hardware overhead. However, it lacks a quantitative measure of structural fault coverage. This paper fills this gap by presenting a satisfiability based method to generate functional test patterns while considering structural faults. The method targets NoC switches and links, and it is independent of the switch structure and the network topology. It can be applied for any structural fault type as it relies on a generalized structural fault model.
BibTeX:
@inproceedings{DalirHIESRW2014,
  author = {Dalirsani, Atefe and Hatami, Nadereh and Imhof, Michael E. and Eggenberger, Marcus and Schley, Gert and Radetzki, Martin and Wunderlich, Hans-Joachim},
  title = {{On Covering Structural Defects in NoCs by Functional Tests}},
  booktitle = {Proceedings of the 23rd IEEE Asian Test Symposium (ATS'14)},
  year = {2014},
  pages = {87--92},
  keywords = {Network-on-Chip (NoC), Functional Test, Functional Failure Modeling, Fault Classification, Boolean Satisfiability (SAT)},
  abstract = {Structural tests provide high defect coverage by considering the low-level circuit details. Functional test provides a faster test with reduced test patterns and does not imply additional hardware overhead. However, it lacks a quantitative measure of structural fault coverage. This paper fills this gap by presenting a satisfiability based method to generate functional test patterns while considering structural faults. The method targets NoC switches and links, and it is independent of the switch structure and the network topology. It can be applied for any structural fault type as it relies on a generalized structural fault model.},
  doi = {http://dx.doi.org/10.1109/ATS.2014.27},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/ATS_DalirHIESRW2014.pdf}
}
230. High Quality System Level Test and Diagnosis
Jutman, A., Sonza Reorda, M. and Wunderlich, H.-J.
Proceedings of the 23rd IEEE Asian Test Symposium (ATS'14), Hangzhou, China, 16-19 November 2014, pp. 298-305
2014
DOI PDF 
Keywords: System test, board test, diagnosis
Abstract: This survey introduces into the common practices, current challenges and advanced techniques of high quality system level test and diagnosis. Specialized techniques and industrial standards of testing complex boards are introduced. The reuse for system test of design for test structures and test data developed at chip level is discussed, including the limitations and research challenges. Structural test methods have to be complemented by functional test methods. State-of-the-art and leading edge research for functional testing will be covered.
BibTeX:
@inproceedings{JutmaSW2014,
  author = {Jutman, Artur and Sonza Reorda, Matteo and Wunderlich, Hans-Joachim},
  title = {{High Quality System Level Test and Diagnosis}},
  booktitle = {Proceedings of the 23rd IEEE Asian Test Symposium (ATS'14)},
  year = {2014},
  pages = {298--305},
  keywords = {System test, board test, diagnosis},
  abstract = {This survey introduces into the common practices, current challenges and advanced techniques of high quality system level test and diagnosis. Specialized techniques and industrial standards of testing complex boards are introduced. The reuse for system test of design for test structures and test data developed at chip level is discussed, including the limitations and research challenges. Structural test methods have to be complemented by functional test methods. State-of-the-art and leading edge research for functional testing will be covered.},
  doi = {http://dx.doi.org/10.1109/ATS.2014.62},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/ATS_JutmaSW2014.pdf}
}
229. Data-Parallel Simulation for Fast and Accurate Timing Validation of CMOS Circuits
Schneider, E., Holst, S., Wen, X. and Wunderlich, H.-J.
Proceedings of the 33rd IEEE/ACM International Conference on Computer-Aided Design (ICCAD'14), San Jose, California, USA, 3-6 November 2014, pp. 17-23
2014
URL PDF 
Abstract: Gate-level timing simulation of combinational CMOS circuits is the foundation of a whole array of important EDA tools such as timing analysis and power-estimation, but the demand for higher simulation accuracy drastically increases the runtime complexity of the algorithms. Data-parallel accelerators such as Graphics Processing Units (GPUs) provide vast amounts of computing performance to tackle this problem, but require careful attention to control-flow and memory access patterns. This paper proposes the novel High-Throughput Oriented Parallel Switch-level Simulator (HiTOPS), which is especially designed to take full advantage of GPUs and provides accurate time- simulation for multi-million gate designs at an unprecedented throughput. HiTOPS models timing at transistor granularity and supports all major timing-related effects found in CMOS including pattern-dependent delay, glitch filtering and transition ramps, while achieving speedups of up to two orders of magnitude compared to traditional gate-level simulators.
BibTeX:
@inproceedings{SchneHWW2014,
  author = {Schneider, Eric and Holst, Stefan and Wen, Xiaoqing and Wunderlich, Hans-Joachim},
  title = {{Data-Parallel Simulation for Fast and Accurate Timing Validation of CMOS Circuits}},
  booktitle = {Proceedings of the 33rd IEEE/ACM International Conference on Computer-Aided Design (ICCAD'14)},
  year = {2014},
  pages = {17--23},
  abstract = {Gate-level timing simulation of combinational CMOS circuits is the foundation of a whole array of important EDA tools such as timing analysis and power-estimation, but the demand for higher simulation accuracy drastically increases the runtime complexity of the algorithms. Data-parallel accelerators such as Graphics Processing Units (GPUs) provide vast amounts of computing performance to tackle this problem, but require careful attention to control-flow and memory access patterns. This paper proposes the novel High-Throughput Oriented Parallel Switch-level Simulator (HiTOPS), which is especially designed to take full advantage of GPUs and provides accurate time- simulation for multi-million gate designs at an unprecedented throughput. HiTOPS models timing at transistor granularity and supports all major timing-related effects found in CMOS including pattern-dependent delay, glitch filtering and transition ramps, while achieving speedups of up to two orders of magnitude compared to traditional gate-level simulators.},
  url = { http://dl.acm.org/citation.cfm?id=2691369 },
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/ICCAD_SchneHWW2014.pdf}
}
228. Adaptive Parallel Simulation of a Two-Timescale-Model for Apoptotic Receptor-Clustering on GPUs
Schöll, A., Braun, C., Daub, M., Schneider, G. and Wunderlich, H.-J.
Proceedings of the IEEE International Conference on Bioinformatics and Biomedicine (BIBM'14), Belfast, United Kingdom, 2-5 November 2014, pp. 424-431
SimTech Best Paper Award
2014
DOI PDF 
Keywords: Heterogeneous computing, GPU computing, parallel particle simulation, multi-timescale model, adaptive Euler-Maruyama approximation, ligand-receptor aggregation
Abstract: Computational biology contributes important solutions for major biological challenges. Unfortunately, most applications in computational biology are highly computeintensive and associated with extensive computing times. Biological problems of interest are often not treatable with traditional simulation models on conventional multi-core CPU systems. This interdisciplinary work introduces a new multi-timescale simulation model for apoptotic receptor-clustering and a new parallel evaluation algorithm that exploits the computational performance of heterogeneous CPU-GPU computing systems. For this purpose, the different dynamics involved in receptor-clustering are separated and simulated on two timescales. Additionally, the time step sizes are adaptively refined on each timescale independently.
This new approach improves the simulation performance significantly and reduces computing times from months to hours for observation times of several seconds.
BibTeX:
@inproceedings{SchoeBDSW2014,
  author = {Schöll, Alexander and Braun, Claus and Daub, Markus and Schneider, Guido and Wunderlich, Hans-Joachim},
  title = {{Adaptive Parallel Simulation of a Two-Timescale-Model for Apoptotic Receptor-Clustering on GPUs}},
  booktitle = {Proceedings of the IEEE International Conference on Bioinformatics and Biomedicine (BIBM'14)},
  year = {2014},
  pages = {424--431},
  keywords = {Heterogeneous computing, GPU computing, parallel particle simulation, multi-timescale model, adaptive Euler-Maruyama approximation, ligand-receptor aggregation},
  abstract = {Computational biology contributes important solutions for major biological challenges. Unfortunately, most applications in computational biology are highly computeintensive and associated with extensive computing times. Biological problems of interest are often not treatable with traditional simulation models on conventional multi-core CPU systems. This interdisciplinary work introduces a new multi-timescale simulation model for apoptotic receptor-clustering and a new parallel evaluation algorithm that exploits the computational performance of heterogeneous CPU-GPU computing systems. For this purpose, the different dynamics involved in receptor-clustering are separated and simulated on two timescales. Additionally, the time step sizes are adaptively refined on each timescale independently.
This new approach improves the simulation performance significantly and reduces computing times from months to hours for observation times of several seconds.}, doi = {http://dx.doi.org/10.1109/BIBM.2014.6999195}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/BIBM_SchoeBDSW2014.pdf} }
227. Test Pattern Generation in Presence of Unknown Values Based on Restricted Symbolic Logic
Erb, D., Scheibler, K., Kochte, M.A., Sauer, M., Wunderlich, H.-J. and Becker, B.
Proceedings of the IEEE International Test Conference (ITC'14), Seattle, Washington, USA, 20-23 October 2014, pp. 1-10
2014
DOI PDF 
Keywords: SAT, QBF, test generation, ATPG, Unknown values, Restricted symbolic logic
Abstract: Test generation algorithms based on standard n-valued logic algebras are pessimistic in presence of unknown (X) values, overestimate the number of signals with X-values and underestimate fault coverage. Recently, an ATPG algorithm based on quantified Boolean formula (QBF) has been presented, which is accurate in presence of X-values but has limits with respect to runtime, scalability and robustness. In this paper, we consider ATPG based on restricted symbolic logic (RSL) and demonstrate its potential. We introduce a complete RSL ATPG exploiting the full potential of RSL in ATPG. Experimental results demonstrate that RSL ATPG significantly increases fault coverage over classical algorithms and provides results very close to the accurate QBF-based algorithm. An optimized version of RSL ATPG (together with accurate fault simulation) is up to 618× faster than the QBF-based solution, more scalable and more robust.
BibTeX:
@inproceedings{ErbSKSWB2014,
  author = {Erb, Dominik and Scheibler, Karsten and Kochte, Michael A. and Sauer, Matthias and Wunderlich, Hans-Joachim and Becker, Bernd},
  title = {{Test Pattern Generation in Presence of Unknown Values Based on Restricted Symbolic Logic}},
  booktitle = {Proceedings of the IEEE International Test Conference (ITC'14)},
  year = {2014},
  pages = {1--10},
  keywords = {SAT, QBF, test generation, ATPG, Unknown values, Restricted symbolic logic},
  abstract = {Test generation algorithms based on standard n-valued logic algebras are pessimistic in presence of unknown (X) values, overestimate the number of signals with X-values and underestimate fault coverage. Recently, an ATPG algorithm based on quantified Boolean formula (QBF) has been presented, which is accurate in presence of X-values but has limits with respect to runtime, scalability and robustness. In this paper, we consider ATPG based on restricted symbolic logic (RSL) and demonstrate its potential. We introduce a complete RSL ATPG exploiting the full potential of RSL in ATPG. Experimental results demonstrate that RSL ATPG significantly increases fault coverage over classical algorithms and provides results very close to the accurate QBF-based algorithm. An optimized version of RSL ATPG (together with accurate fault simulation) is up to 618× faster than the QBF-based solution, more scalable and more robust.},
  doi = {http://dx.doi.org/10.1109/TEST.2014.7035350},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/ITC_ErbSKSWB2014.pdf}
}
226. FAST-BIST: Faster-than-At-Speed BIST Targeting Hidden Delay Defects
Hellebrand, S., Indlekofer, T., Kampmann, M., Kochte, M.A., Liu, C. and Wunderlich, H.-J.
Proceedings of the IEEE International Test Conference (ITC'14), Seattle, Washington, USA, 20-23 October 2014, pp. 1-8
2014
DOI PDF 
Abstract: Small delay faults may be an indicator of a reliability threat, even if they do not affect the system functionality yet. In recent years, Faster-than-at-Speed-Test (FAST) has become a feasible method to detect faults, which are hidden by the timing slack or by long critical paths in the combinational logic. FAST poses severe challenges to the automatic test equipment with respect to timing, performance, and resolution. In this paper, it is shown how logic built-in self-test (BIST) or embedded deterministic test can be used for an efficient FAST application. Running BIST just at a higher frequency is not an option, as outputs of long paths will receive undefined values due to set time violations and destroy the content of the signature registers. Instead, for a given test pattern sequence, faults are classified according to the optimal detection frequency. For each class, a MISR-based compaction scheme is adapted, such that the critical bits to be observed can be determined by algebraic computations. Experiments show that rather a small number of inter-mediate signatures have to be evaluated to observe a large fraction of hidden delay faults testable by the given test sequence.
BibTeX:
@inproceedings{HelleIKKLW2014,
  author = {Hellebrand, Sybille and Indlekofer, Thomas and Kampmann, Matthias and Kochte, Michael A. and Liu, Chang and Wunderlich, Hans-Joachim},
  title = {{FAST-BIST: Faster-than-At-Speed BIST Targeting Hidden Delay Defects}},
  booktitle = {Proceedings of the IEEE International Test Conference (ITC'14)},
  year = {2014},
  pages = {1--8},
  abstract = {Small delay faults may be an indicator of a reliability threat, even if they do not affect the system functionality yet. In recent years, Faster-than-at-Speed-Test (FAST) has become a feasible method to detect faults, which are hidden by the timing slack or by long critical paths in the combinational logic. FAST poses severe challenges to the automatic test equipment with respect to timing, performance, and resolution. In this paper, it is shown how logic built-in self-test (BIST) or embedded deterministic test can be used for an efficient FAST application. Running BIST just at a higher frequency is not an option, as outputs of long paths will receive undefined values due to set time violations and destroy the content of the signature registers. Instead, for a given test pattern sequence, faults are classified according to the optimal detection frequency. For each class, a MISR-based compaction scheme is adapted, such that the critical bits to be observed can be determined by algebraic computations. Experiments show that rather a small number of inter-mediate signatures have to be evaluated to observe a large fraction of hidden delay faults testable by the given test sequence.},
  doi = {http://dx.doi.org/10.1109/TEST.2014.7035360},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/ITC_HelleIKKLW2014.pdf}
}
225. Adaptive Bayesian Diagnosis of Intermittent Faults
Rodríguez Gómez, L., Cook, A., Indlekofer, T., Hellebrand, S. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 30(5), 30 September 2014, pp. 527-540
2014
DOI URL PDF 
Keywords: Built-In Self-Test, Built-in diagnosis, Transient faults, Intermittent faults, Bayesian diagnosis
Abstract: With increasing transient error rates, distinguishing intermittent and transient faults is especially challenging. In addition to particle strikes relatively high transient error rates are observed in architectures for opportunistic computing and in technologies under high variations. This paper presents a method to classify faults into permanent, intermittent and transient faults based on some intermediate signatures during embedded test or built-in self-test.
Permanent faults are easily determined by repeating test sessions. Intermittent and transient faults can be identified by the amount of failing test sessions in many cases. For the remaining faults, a Bayesian classification technique has been developed which is applicable to large digital circuits. The combination of these methods is able to identify intermittent faults with a probability of more than 98 %.
BibTeX:
@article{RodriCIHW2014,
  author = {Rodríguez Gómez, Laura and Cook, Alejandro and Indlekofer, Thomas and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Adaptive Bayesian Diagnosis of Intermittent Faults}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  year = {2014},
  volume = {30},
  number = {5},
  pages = {527--540},
  keywords = { Built-In Self-Test, Built-in diagnosis, Transient faults, Intermittent faults, Bayesian diagnosis },
  abstract = { With increasing transient error rates, distinguishing intermittent and transient faults is especially challenging. In addition to particle strikes relatively high transient error rates are observed in architectures for opportunistic computing and in technologies under high variations. This paper presents a method to classify faults into permanent, intermittent and transient faults based on some intermediate signatures during embedded test or built-in self-test.
Permanent faults are easily determined by repeating test sessions. Intermittent and transient faults can be identified by the amount of failing test sessions in many cases. For the remaining faults, a Bayesian classification technique has been developed which is applicable to large digital circuits. The combination of these methods is able to identify intermittent faults with a probability of more than 98 %.}, url = { http://link.springer.com/article/10.1007/s10836-014-5477-1 }, doi = {http://dx.doi.org/10.1007/s10836-014-5477-1}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/JETTA_RodriCIHW2014.pdf} }
224. Multi-Level Simulation of Non-Functional Properties by Piecewise Evaluation
Hatami, N., Baranowski, R., Prinetto, P. and Wunderlich, H.-J.
ACM Transactions on Design Automation of Electronic Systems (TODAES)
Vol. 19(4), August 2014, pp. 37:1-37:21
2014
DOI PDF 
Keywords: Design, Verification, Reliability
Abstract: As the technology shrinks, nonfunctional properties (NFPs) such as reliability, vulnerability, power consumption, or heat dissipation become as important as system functionality. As NFPs often influence each other, depend on the application and workload of a system, and exhibit nonlinear behavior, NFP simulation over long periods of system operation is computationally expensive, if feasible at all. This article presents a piecewise evaluation method for efficient NFP simulation. Simulation time is divided into intervals called evaluation windows, within which the NFP models are partially linearized. High-speed functional system simulation is achieved by parallel execution of models at different levels of abstraction. A trade-off between simulation speed and accuracy is met by adjusting the size of the evaluation window. As an example, the piecewise evaluation technique is applied to analyze aging caused by two mechanisms, namely Negative Bias Temperature Instability (NBTI) and Hot Carrier Injection (HCI), in order to identify reliability hotspots. Experiments show that the proposed technique yields considerable simulation speedup at a marginal loss of accuracy.
BibTeX:
@article{HatamBPW2014,
  author = {Hatami, Nadereh and Baranowski, Rafal and Prinetto, Paolo and Wunderlich, Hans-Joachim},
  title = {{Multi-Level Simulation of Non-Functional Properties by Piecewise Evaluation}},
  journal = {ACM Transactions on Design Automation of Electronic Systems (TODAES)},
  year = {2014},
  volume = {19},
  number = {4},
  pages = {37:1--37:21},
  keywords = {Design, Verification, Reliability},
  abstract = {As the technology shrinks, nonfunctional properties (NFPs) such as reliability, vulnerability, power consumption, or heat dissipation become as important as system functionality. As NFPs often influence each other, depend on the application and workload of a system, and exhibit nonlinear behavior, NFP simulation over long periods of system operation is computationally expensive, if feasible at all. This article presents a piecewise evaluation method for efficient NFP simulation. Simulation time is divided into intervals called evaluation windows, within which the NFP models are partially linearized. High-speed functional system simulation is achieved by parallel execution of models at different levels of abstraction. A trade-off between simulation speed and accuracy is met by adjusting the size of the evaluation window. As an example, the piecewise evaluation technique is applied to analyze aging caused by two mechanisms, namely Negative Bias Temperature Instability (NBTI) and Hot Carrier Injection (HCI), in order to identify reliability hotspots. Experiments show that the proposed technique yields considerable simulation speedup at a marginal loss of accuracy.},
  doi = {http://dx.doi.org/10.1145/2647955},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/TODAES_HatamBPW2014.pdf}
}
223. SAT-Based ATPG beyond Stuck-at Fault Testing
Hellebrand, S. and Wunderlich, H.-J.
it - Information Technology
Vol. 56(4), 21 July 2014, pp. 165-172
2014
DOI PDF 
Keywords: ACM CCS→Hardware→Hardware test, SAT-based ATPG, Fault Tolerance, Self-Checking Circuits, Synthesis
Abstract: To cope with the problems of technology scaling, a robust design has become desirable. Self-checking circuits combined with rollback or repair strategies can provide a low cost solution for many applications. However, standard synthesis procedures may violate design constraints or lead to sub-optimal designs. The SAT-based strategies for the verification and synthesis of self-checking circuits presented in this paper can provide efficient solutions.
BibTeX:
@article{HelleW2014,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{SAT-Based ATPG beyond Stuck-at Fault Testing}},
  journal = {it - Information Technology},
  year = {2014},
  volume = {56},
  number = {4},
  pages = {165--172},
  keywords = {ACM CCS→Hardware→Hardware test, SAT-based ATPG, Fault Tolerance, Self-Checking Circuits, Synthesis},
  abstract = {To cope with the problems of technology scaling, a robust design has become desirable. Self-checking circuits combined with rollback or repair strategies can provide a low cost solution for many applications. However, standard synthesis procedures may violate design constraints or lead to sub-optimal designs. The SAT-based strategies for the verification and synthesis of self-checking circuits presented in this paper can provide efficient solutions.},
  doi = {http://dx.doi.org/10.1515/itit-2013-1043},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/ITIT_HelleW2014.pdf}
}
222. Area-Efficient Synthesis of Fault-Secure NoC Switches
Dalirsani, A., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the 20th IEEE International On-Line Testing Symposium (IOLTS'14), Platja d'Aro, Catalunya, Spain, 7-9 July 2014, pp. 13-18
2014
DOI PDF 
Keywords: Network-on-Chip, self-checking, fault-secure, online testing, concurrent error detection
Abstract: This paper introduces a hybrid method to synthesize area-efficient fault-secure NoC switches to detect all errors resulting from any single-point combinational or transition fault in switches and interconnect links. Firstly, the structural faults that are always detectable by data encoding at flit-level are identified. Next, the fault-secure structure is constructed with minimized area such that errors caused by the remaining faults are detected under any given input vector. The experimental evaluation shows significant area savings compared to conventional fault-secure schemes. In addition, the resulting structure can be reused for test compaction. This reduces the amount of test response data and test time without loss of fault coverage or diagnostic resolution.
BibTeX:
@inproceedings{DalirKW2014,
  author = {Dalirsani, Atefe and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Area-Efficient Synthesis of Fault-Secure NoC Switches}},
  booktitle = {Proceedings of the 20th IEEE International On-Line Testing Symposium (IOLTS'14)},
  year = {2014},
  pages = {13--18},
  keywords = {Network-on-Chip, self-checking, fault-secure, online testing, concurrent error detection},
  abstract = {This paper introduces a hybrid method to synthesize area-efficient fault-secure NoC switches to detect all errors resulting from any single-point combinational or transition fault in switches and interconnect links. Firstly, the structural faults that are always detectable by data encoding at flit-level are identified. Next, the fault-secure structure is constructed with minimized area such that errors caused by the remaining faults are detected under any given input vector. The experimental evaluation shows significant area savings compared to conventional fault-secure schemes. In addition, the resulting structure can be reused for test compaction. This reduces the amount of test response data and test time without loss of fault coverage or diagnostic resolution.},
  doi = {http://dx.doi.org/10.1109/IOLTS.2014.6873662},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/IOLTS_DalirKW2014.pdf}
}
221. A-ABFT: Autonomous Algorithm-Based Fault Tolerance for Matrix Multiplications on Graphics Processing Units
Braun, C., Halder, S. and Wunderlich, H.-J.
Proceedings of the 44th Annual IEEE/IFIP International Conference on Dependable Systems and Networks (DSN'14), Atlanta, Georgia, USA, 23-26 June 2014, pp. 443-454
2014
DOI PDF 
Keywords: Algorithm-Based Fault Tolerance, Rounding Error Estimation, GPU, Matrix Multiplication
Abstract: Graphics processing units (GPUs) enable large-scale scientific applications and simulations on the desktop. To allow scientific computing on GPUs with high performance and reliability requirements, the application of software-based fault tolerance is attractive. Algorithm-Based Fault Tolerance (ABFT) protects important scientific operations like matrix multiplications. However, the application to floating-point operations necessitates the runtime classification of errors into inevitable rounding errors, allowed compute errors in the magnitude of such rounding errors, and into critical errors that are larger than those and not tolerable. Hence, an ABFT scheme needs suitable rounding error bounds to detect errors reliably. The determination of such error bounds is a highly challenging task, especially since it has to be integrated tightly into the algorithm and executed autonomously with low performance overhead.
In this work, A-ABFT for matrix multiplications on GPUs is introduced, which is a new, parallel ABFT scheme that determines rounding error bounds autonomously at runtime with low performance overhead and high error coverage.
BibTeX:
@inproceedings{BraunHW2014,
  author = {Braun, Claus and Halder, Sebastian and Wunderlich, Hans-Joachim},
  title = {{A-ABFT: Autonomous Algorithm-Based Fault Tolerance for Matrix Multiplications on Graphics Processing Units}},
  booktitle = {Proceedings of the 44th Annual IEEE/IFIP International Conference on Dependable Systems and Networks (DSN'14)},
  year = {2014},
  pages = {443--454},
  keywords = {Algorithm-Based Fault Tolerance, Rounding Error Estimation, GPU, Matrix Multiplication },
  abstract = {Graphics processing units (GPUs) enable large-scale scientific applications and simulations on the desktop. To allow scientific computing on GPUs with high performance and reliability requirements, the application of software-based fault tolerance is attractive. Algorithm-Based Fault Tolerance (ABFT) protects important scientific operations like matrix multiplications. However, the application to floating-point operations necessitates the runtime classification of errors into inevitable rounding errors, allowed compute errors in the magnitude of such rounding errors, and into critical errors that are larger than those and not tolerable. Hence, an ABFT scheme needs suitable rounding error bounds to detect errors reliably. The determination of such error bounds is a highly challenging task, especially since it has to be integrated tightly into the algorithm and executed autonomously with low performance overhead.
In this work, A-ABFT for matrix multiplications on GPUs is introduced, which is a new, parallel ABFT scheme that determines rounding error bounds autonomously at runtime with low performance overhead and high error coverage.}, doi = {http://dx.doi.org/10.1109/DSN.2014.48}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/DSN_BraunHH2014.pdf} }
220. A New Hybrid Fault-Tolerant Architecture for Digital CMOS Circuits and Systems
Tran, D.A., Virazel, A., Bosio, A., Dilillo, L., Girard, P., Pravossoudovich, S. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 30(4), 8 June 2014, pp. 401-413
2014
DOI URL PDF 
Keywords: Fault tolerance, Hard error, Soft error, Timing error
Abstract: This paper presents a new hybrid fault-tolerant architecture for robustness improvement of digital CMOS circuits and systems. It targets all kinds of errors in combinational part of logic circuits and thus, can be combined with advanced SEU protection techniques for sequential elements while reducing the power consumption. The proposed architecture combines different types of redundancies: information redundancy for error detection, temporal redundancy for soft error correction and hardware redundancy for hard error correction. Moreover, it uses a pseudo-dynamic comparator for SET and timing errors detection. Besides, the proposed method also aims to reduce power consumption of fault-tolerant architectures while keeping a comparable area overhead compared to existing solutions. Results on the largest ISCAS'85 and ITC'99 benchmark circuits show that our approach has an area cost of about 3 % to 6 % with a power consumption saving of about 33 % compared to TMR architectures.
BibTeX:
@article{TranVBDGPW2014,
  author = {Tran, Duc A. and Virazel, Arnaud and Bosio, Alberto and Dilillo, Luigi and Girard, Patrick and Pravossoudovich, Serge and Wunderlich, Hans-Joachim},
  title = {{A New Hybrid Fault-Tolerant Architecture for Digital CMOS Circuits and Systems}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  year = {2014},
  volume = {30},
  number = {4},
  pages = {401--413},
  keywords = {Fault tolerance, Hard error, Soft error, Timing error},
  abstract = {This paper presents a new hybrid fault-tolerant architecture for robustness improvement of digital CMOS circuits and systems. It targets all kinds of errors in combinational part of logic circuits and thus, can be combined with advanced SEU protection techniques for sequential elements while reducing the power consumption. The proposed architecture combines different types of redundancies: information redundancy for error detection, temporal redundancy for soft error correction and hardware redundancy for hard error correction. Moreover, it uses a pseudo-dynamic comparator for SET and timing errors detection. Besides, the proposed method also aims to reduce power consumption of fault-tolerant architectures while keeping a comparable area overhead compared to existing solutions. Results on the largest ISCAS'85 and ITC'99 benchmark circuits show that our approach has an area cost of about 3 % to 6 % with a power consumption saving of about 33 % compared to TMR architectures.},
  url = {http://link.springer.com/article/10.1007/s10836-014-5459-3},
  doi = {http://dx.doi.org/10.1007/s10836-014-5459-3},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/JETTA_TranVBDGPW2014.pdf}
}
219. Advanced Diagnosis: SBST and BIST Integration in Automotive E/E Architectures
Reimann, F., Glaß, M., Teich, J., Cook, A., Rodríguez Gómez, L., Ull, D., Wunderlich, H.-J., Abelein, U. and Engelke, P.
Proceedings of the 51st ACM/IEEE Design Automation Conference (DAC'14), San Francisco, California, USA, 1-5 June 2014, pp. 1-9
HiPEAC Paper Award
2014
DOI PDF 
Abstract: The constantly growing amount of semiconductors in automotive systems increases the number of possible defect mechanisms, and therefore raises also the effort to maintain a sufficient level of quality and reliability. A promising solution to this problem is the on-line application of structural tests in key components, typically ECUs. In this work, an approach for the optimized integration of both Software-Based Self-Tests (SBST) and Built-In Self-Tests (BIST) into E/E architectures is presented. The approach integrates the execution of the tests non-intrusively, i. e., it (a) does not affect functional applications and (b) does not require costly changes in the communication schedules or additional communication overhead. Via design space exploration, optimized implementations with respect to multiple conflicting objectives, i. e., monetary costs, safety, test quality, and required execution time are derived.
BibTeX:
@inproceedings{ReimaGTCRUWAE2014,
  author = {Reimann, Felix and Glaß, Michael and Teich, Jürgen and Cook, Alejandro and Rodríguez Gómez, Laura and Ull, Dominik and Wunderlich, Hans-Joachim and Abelein, Ulrich and Engelke, Piet},
  title = {{Advanced Diagnosis: SBST and BIST Integration in Automotive E/E Architectures}},
  booktitle = {Proceedings of the 51st ACM/IEEE Design Automation Conference (DAC'14)},
  year = {2014},
  pages = {1--9},
  abstract = {The constantly growing amount of semiconductors in automotive systems increases the number of possible defect mechanisms, and therefore raises also the effort to maintain a sufficient level of quality and reliability. A promising solution to this problem is the on-line application of structural tests in key components, typically ECUs. In this work, an approach for the optimized integration of both Software-Based Self-Tests (SBST) and Built-In Self-Tests (BIST) into E/E architectures is presented. The approach integrates the execution of the tests non-intrusively, i. e., it (a) does not affect functional applications and (b) does not require costly changes in the communication schedules or additional communication overhead. Via design space exploration, optimized implementations with respect to multiple conflicting objectives, i. e., monetary costs, safety, test quality, and required execution time are derived.},
  doi = {http://dx.doi.org/10.1145/2593069.2602971},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/DAC_ReimaGTCRUWAE2014.pdf}
}
218. GUARD: GUAranteed Reliability in Dynamically Reconfigurable Systems
Zhang, H., Kochte, M.A., Imhof, M.E., Bauer, L., Wunderlich, H.-J. and Henkel, J.
Proceedings of the 51st ACM/EDAC/IEEE Design Automation Conference (DAC'14), San Francisco, California, USA, 1-5 June 2014, pp. 1-6
HiPEAC Paper Award
2014
DOI PDF 
Abstract: Soft errors are a reliability threat for reconfigurable systems implemented with SRAM-based FPGAs. They can be handled through fault tolerance techniques like scrubbing and modular redundancy. However, selecting these techniques statically at design or compile time tends to be pessimistic and prohibits optimal adaptation to changing soft error rate at runtime.
We present the GUARD method which allows for autonomous runtime reliability management in reconfigurable architectures: Based on the error rate observed during runtime, the runtime system dynamically determines whether a computation should be executed by a hardened processor, or whether it should be accelerated by inherently less reliable reconfigurable hardware which can trade-off performance and reliability. GUARD is the first runtime system for reconfigurable architectures that guarantees a target reliability while optimizing the performance. This allows applications to dynamically chose the desired degree of reliability. Compared to related work with statically optimized fault tolerance techniques, GUARD provides up to 68.3% higher performance at the same target reliability.
BibTeX:
@inproceedings{ZhangKIBWH2014,
  author = {Zhang, Hongyan and Kochte, Michael A. and Imhof, Michael E. and Bauer, Lars and Wunderlich, Hans-Joachim and Henkel, Jörg},
  title = {{GUARD: GUAranteed Reliability in Dynamically Reconfigurable Systems}},
  booktitle = {Proceedings of the 51st ACM/EDAC/IEEE Design Automation Conference (DAC'14)},
  year = {2014},
  pages = {1--6},
  abstract = {Soft errors are a reliability threat for reconfigurable systems implemented with SRAM-based FPGAs. They can be handled through fault tolerance techniques like scrubbing and modular redundancy. However, selecting these techniques statically at design or compile time tends to be pessimistic and prohibits optimal adaptation to changing soft error rate at runtime.
We present the GUARD method which allows for autonomous runtime reliability management in reconfigurable architectures: Based on the error rate observed during runtime, the runtime system dynamically determines whether a computation should be executed by a hardened processor, or whether it should be accelerated by inherently less reliable reconfigurable hardware which can trade-off performance and reliability. GUARD is the first runtime system for reconfigurable architectures that guarantees a target reliability while optimizing the performance. This allows applications to dynamically chose the desired degree of reliability. Compared to related work with statically optimized fault tolerance techniques, GUARD provides up to 68.3% higher performance at the same target reliability.}, doi = {http://dx.doi.org/10.1145/2593069.2593146}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/DAC_ZhangKIBWH2014.pdf} }
217. Exact Logic and Fault Simulation in Presence of Unknowns
Erb, D., Kochte, M.A., Sauer, M., Hillebrecht, S., Schubert, T., Wunderlich, H.-J. and Becker, B.
ACM Transactions on Design Automation of Electronic Systems (TODAES)
Vol. 19(3), June 2014, pp. 28:1-28:17
2014
DOI PDF 
Keywords: Algorithms, Reliability, Unknown values, simulation pessimism, exact logic simulation, exact fault simulation, SAT
Abstract: Logic and fault simulation are essential techniques in electronic design automation. The accuracy of standard simulation algorithms is compromised by unknown or X-values. This results in a pessimistic overestimation of X-valued signals in the circuit and a pessimistic underestimation of fault coverage.
This work proposes efficient algorithms for combinational and sequential logic as well as for stuck-at and transition-delay fault simulation that are free of any simulation pessimism in presence of unknowns. The SAT-based algorithms exactly classifiy all signal states. During fault simulation, each fault is accurately classified as either undetected, definitely detected, or possibly detected.
The pessimism with respect to unknowns present in classic algorithms is thoroughly investigated in the experimental results on benchmark circuits. The applicability of the proposed algorithms is demonstrated on larger industrial circuits. The results show that, by accurate analysis, the number of detected faults can be significantly increased without increasing the test-set size.
BibTeX:
@article{ErbKSHSWB2014,
  author = {Erb, Dominik and Kochte, Michael A. and Sauer, Matthias and Hillebrecht, Stefan and Schubert, Tobias and Wunderlich, Hans-Joachim and Becker, Bernd},
  title = {{Exact Logic and Fault Simulation in Presence of Unknowns}},
  journal = {ACM Transactions on Design Automation of Electronic Systems (TODAES)},
  year = {2014},
  volume = {19},
  number = {3},
  pages = {28:1--28:17},
  keywords = {Algorithms, Reliability, Unknown values, simulation pessimism, exact logic simulation, exact fault simulation, SAT},
  abstract = {Logic and fault simulation are essential techniques in electronic design automation. The accuracy of standard simulation algorithms is compromised by unknown or X-values. This results in a pessimistic overestimation of X-valued signals in the circuit and a pessimistic underestimation of fault coverage.
This work proposes efficient algorithms for combinational and sequential logic as well as for stuck-at and transition-delay fault simulation that are free of any simulation pessimism in presence of unknowns. The SAT-based algorithms exactly classifiy all signal states. During fault simulation, each fault is accurately classified as either undetected, definitely detected, or possibly detected.
The pessimism with respect to unknowns present in classic algorithms is thoroughly investigated in the experimental results on benchmark circuits. The applicability of the proposed algorithms is demonstrated on larger industrial circuits. The results show that, by accurate analysis, the number of detected faults can be significantly increased without increasing the test-set size.}, doi = {http://dx.doi.org/10.1145/2611760}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/TODAES_ErbKSHSWB2014.pdf} }
216. Resilience Articulation Point (RAP): Cross-layer Dependability Modeling for Nanometer System-on-chip Resilience
Herkersdorf, A., Aliee, H., Engel, M., Glaß, M., Gimmler-Dumont, C., Henkel, J., Kleeberger, V.B., Kochte, M.A., Kühn, J.M., Mueller-Gritschneder, D., Nassif, S.R., Rauchfuss, H., Rosenstiel, W., Schlichtmann, U., Shafique, M., Tahoori, M.B., Teich, J., Wehn, N., Weis, C. and Wunderlich, H.-J.
Elsevier Microelectronics Reliability Journal
Vol. 54(6-7), June-July 2014, pp. 1066-1074
2014
DOI PDF 
Keywords: Cross-layer SoC resilience, probabilistic dependability modeling, SRAM error models, critical charge, transient soft errors, permanent aging defects, error abstraction, error transformation, system-level failure analysis, resilience articulation point
Abstract: The Resilience Articulation Point (RAP) model aims at provisioning researchers and developers with a probabilistic fault abstraction and error propagation framework covering all hardware/software layers of a System on Chip. RAP assumes that physically induced faults at the technology or CMOS device layer will eventually manifest themselves as a single or multiple bit flip(s). When probabilistic error functions for specific fault origins are known at the bit or signal level, knowledge about the unit of design and its environment allow the transformation of the bit-related error functions into characteristic higher layer representations, such as error functions for data words, Finite State Machine (FSM) state, macro-interfaces or software variables. Thus, design concerns at higher abstraction layers can be investigated without the necessity to further consider the full details of lower levels of design. This paper introduces the ideas of RAP based on examples of radiation induced soft errors in SRAM cells, voltage variations and sequential CMOS logic. It shows by example how probabilistic bit flips are systematically abstracted and propagated towards higher abstraction levels up to the application software layer, and how RAP can be used to parameterize architecture-level resilience methods.
BibTeX:
@article{HerkeAEGGHKKKMNRRSSTTWWW2014,
  author = {Herkersdorf, Andreas and Aliee, Hananeh and Engel, Michael and Glaß, Michael and Gimmler-Dumont, Christina and Henkel, Jörg and Kleeberger, Veit B. and Kochte, Michael A. and Kühn, Johannes M. and Mueller-Gritschneder, Daniel and Nassif, Sani R. and Rauchfuss, Holm and Rosenstiel, Wolfgang and Schlichtmann, Ulf and Shafique, Muhammad and Tahoori, Mehdi B. and Teich, Jürgen and Wehn, Norbert and Weis, Christian and Wunderlich, Hans-Joachim },
  title = {{Resilience Articulation Point (RAP): Cross-layer Dependability Modeling for Nanometer System-on-chip Resilience}},
  journal = {Elsevier Microelectronics Reliability Journal},
  year = {2014},
  volume = {54},
  number = {6--7},
  pages = {1066--1074},
  keywords = {Cross-layer SoC resilience, probabilistic dependability modeling, SRAM error models, critical charge, transient soft errors, permanent aging defects, error abstraction, error transformation, system-level failure analysis, resilience articulation point},
  abstract = {The Resilience Articulation Point (RAP) model aims at provisioning researchers and developers with a probabilistic fault abstraction and error propagation framework covering all hardware/software layers of a System on Chip. RAP assumes that physically induced faults at the technology or CMOS device layer will eventually manifest themselves as a single or multiple bit flip(s). When probabilistic error functions for specific fault origins are known at the bit or signal level, knowledge about the unit of design and its environment allow the transformation of the bit-related error functions into characteristic higher layer representations, such as error functions for data words, Finite State Machine (FSM) state, macro-interfaces or software variables. Thus, design concerns at higher abstraction layers can be investigated without the necessity to further consider the full details of lower levels of design. This paper introduces the ideas of RAP based on examples of radiation induced soft errors in SRAM cells, voltage variations and sequential CMOS logic. It shows by example how probabilistic bit flips are systematically abstracted and propagated towards higher abstraction levels up to the application software layer, and how RAP can be used to parameterize architecture-level resilience methods. },
  doi = {http://dx.doi.org/10.1016/j.microrel.2013.12.012},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/Elsevier_HerkeAEGGHKKKMNRRSSTTWWW2014.pdf}
}
215. Diagnosis of Multiple Faults with Highly Compacted Test Responses
Cook, A. and Wunderlich, H.-J.
Proceedings of the 19th IEEE European Test Symposium (ETS'14), Paderborn, Germany, 26-30 May 2014, pp. 27-30
2014
DOI PDF 
Keywords: Multiple Faults, Diagnosis, Response Compaction
Abstract: Defects cluster, and the probability of a multiple fault is significantly higher than just the product of the single fault probabilities. While this observation is beneficial for high yield, it complicates fault diagnosis. Multiple faults will occur especially often during process learning, yield ramp-up and field return analysis.
In this paper, a logic diagnosis algorithm is presented which is robust against multiple faults and which is able to diagnose multiple faults with high accuracy even on compressed test responses as they are produced in embedded test and built-in self-test. The developed solution takes advantage of the linear properties of a MISR compactor to identify a set of faults likely to produce the observed faulty signatures. Experimental results show an improvement in accuracy of up to 22 % over traditional logic diagnosis solutions suitable for comparable compaction ratios.
BibTeX:
@inproceedings{CookW2014,
  author = {Cook, Alejandro and Wunderlich, Hans-Joachim},
  title = {{Diagnosis of Multiple Faults with Highly Compacted Test Responses}},
  booktitle = {Proceedings of the 19th IEEE European Test Symposium (ETS'14)},
  year = {2014},
  pages = { 27--30 },
  keywords = {Multiple Faults, Diagnosis, Response Compaction},
  abstract = {Defects cluster, and the probability of a multiple fault is significantly higher than just the product of the single fault probabilities. While this observation is beneficial for high yield, it complicates fault diagnosis. Multiple faults will occur especially often during process learning, yield ramp-up and field return analysis.
In this paper, a logic diagnosis algorithm is presented which is robust against multiple faults and which is able to diagnose multiple faults with high accuracy even on compressed test responses as they are produced in embedded test and built-in self-test. The developed solution takes advantage of the linear properties of a MISR compactor to identify a set of faults likely to produce the observed faulty signatures. Experimental results show an improvement in accuracy of up to 22 % over traditional logic diagnosis solutions suitable for comparable compaction ratios.}, doi = {http://dx.doi.org/10.1109/ETS.2014.6847796}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/ETS_CookW2014.pdf} }
214. Variation-Aware Deterministic ATPG
Sauer, M., Polian, I., Imhof, M.E., Mumtaz, A., Schneider, E., Czutro, A., Wunderlich, H.-J. and Becker, B.
Proceedings of the 19th IEEE European Test Symposium (ETS'14), Paderborn, Germany, 26-30 May 2014, pp. 87-92
Best paper award
2014
DOI URL PDF 
Keywords: Variation-aware test, fault efficiency, ATPG
Abstract: In technologies affected by variability, the detection status of a small-delay fault may vary among manufactured circuit instances. The same fault may be detected, missed or provably undetectable in different circuit instances. We introduce the first complete flow to accurately evaluate and systematically maximize the test quality under variability. As the number of possible circuit instances is infinite, we employ statistical analysis to obtain a test set that achieves a fault-efficiency target with an user-defined confidence level. The algorithm combines a classical path-oriented test-generation procedure with a novel waveformaccurate engine that can formally prove that a small-delay fault is not detectable and does not count towards fault efficiency. Extensive simulation results demonstrate the performance of the generated test sets for industrial circuits affected by uncorrelated and correlated variations.
BibTeX:
@inproceedings{SauerPIMSCWB2014,
  author = {Sauer, Matthias and Polian, Ilia and Imhof, Michael E. and Mumtaz, Abdullah and Schneider, Eric and Czutro, Alexander and Wunderlich, Hans-Joachim and Becker, Bernd},
  title = {{Variation-Aware Deterministic ATPG}},
  booktitle = {Proceedings of the 19th IEEE European Test Symposium (ETS'14)},
  year = {2014},
  pages = {87--92},
  keywords = {Variation-aware test, fault efficiency, ATPG},
  abstract = {In technologies affected by variability, the detection status of a small-delay fault may vary among manufactured circuit instances. The same fault may be detected, missed or provably undetectable in different circuit instances. We introduce the first complete flow to accurately evaluate and systematically maximize the test quality under variability. As the number of possible circuit instances is infinite, we employ statistical analysis to obtain a test set that achieves a fault-efficiency target with an user-defined confidence level. The algorithm combines a classical path-oriented test-generation procedure with a novel waveformaccurate engine that can formally prove that a small-delay fault is not detectable and does not count towards fault efficiency. Extensive simulation results demonstrate the performance of the generated test sets for industrial circuits affected by uncorrelated and correlated variations.},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6847806},
  doi = {http://dx.doi.org/10.1109/ETS.2014.6847806},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/ETS_SauerPIMSCWB2014.pdf}
}
213. Incremental Computation of Delay Fault Detection Probability for Variation-Aware Test Generation
Wagner, M. and Wunderlich, H.-J.
Proceedings of the 19th IEEE European Test Symposium (ETS'14), Paderborn, Germany, 26-30 May 2014, pp. 81-86
2014
DOI PDF 
Keywords: delay test, process variations, delay test quality
Abstract: Large process variations in recent technology nodes present a major challenge for the timing analysis of digital integrated circuits. The optimization decisions of a statistical delay test generation method must therefore rely on the probability of detecting a target delay fault with the currently chosen test vector pairs. However, the huge number of probability evaluations in practical applications creates a large computational overhead.
To address this issue, this paper presents the first incremental delay fault detection probability computation algorithm in the literature, which is suitable for the inner loop of automatic test pattern generation methods. Compared to Monte Carlo simulations of NXP benchmark circuits, the new method consistently shows a very large speedup and only a small approximation error.
BibTeX:
@inproceedings{WagneW2014,
  author = {Wagner, Marcus and Wunderlich, Hans-Joachim},
  title = {{Incremental Computation of Delay Fault Detection Probability for Variation-Aware Test Generation}},
  booktitle = {Proceedings of the 19th IEEE European Test Symposium (ETS'14)},
  year = {2014},
  pages = { 81--86 },
  keywords = {delay test, process variations, delay test quality},
  abstract = {Large process variations in recent technology nodes present a major challenge for the timing analysis of digital integrated circuits. The optimization decisions of a statistical delay test generation method must therefore rely on the probability of detecting a target delay fault with the currently chosen test vector pairs. However, the huge number of probability evaluations in practical applications creates a large computational overhead.
To address this issue, this paper presents the first incremental delay fault detection probability computation algorithm in the literature, which is suitable for the inner loop of automatic test pattern generation methods. Compared to Monte Carlo simulations of NXP benchmark circuits, the new method consistently shows a very large speedup and only a small approximation error.}, doi = {http://dx.doi.org/10.1109/ETS.2014.6847805}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/ETS_WagneW2014.pdf} }
212. Structural Software-Based Self-Test of Network-on-Chip
Dalirsani, A., Imhof, M.E. and Wunderlich, H.-J.
Proceedings of the 32nd IEEE VLSI Test Symposium (VTS'14), Napa, California, USA, 13-17 April 2014
2014
DOI URL PDF 
Keywords: Network-on-Chip (NoC), Software-Based Self-Test (SBST), Automatic Test Pattern Generation (ATPG), Boolean Satisfiability (SAT)
Abstract: Software-Based Self-Test (SBST) is extended to the switches of complex Network-on-Chips (NoC). Test patterns for structural faults are turned into valid packets by using satisfiability (SAT) solvers. The test technique provides a high fault coverage for both manufacturing test and online test.
BibTeX:
@inproceedings{DalirIW2014,
  author = {Dalirsani, Atefe and Imhof, Michael E. and Wunderlich, Hans-Joachim},
  title = {{Structural Software-Based Self-Test of Network-on-Chip}},
  booktitle = {Proceedings of the 32nd IEEE VLSI Test Symposium (VTS'14)},
  year = {2014},
  keywords = {Network-on-Chip (NoC), Software-Based Self-Test (SBST), Automatic Test Pattern Generation (ATPG), Boolean Satisfiability (SAT)},
  abstract = {Software-Based Self-Test (SBST) is extended to the switches of complex Network-on-Chips (NoC). Test patterns for structural faults are turned into valid packets by using satisfiability (SAT) solvers. The test technique provides a high fault coverage for both manufacturing test and online test.},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6818754},
  doi = {http://dx.doi.org/10.1109/VTS.2014.6818754},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/VTS_DalirIW2014.pdf}
}
211. Non-Intrusive Integration of Advanced Diagnosis Features in Automotive E/E-Architectures
Abelein, U., Cook, A., Engelke, P., Glaß, M., Reimann, F., Rodríguez Gómez, L., Russ, T., Teich, J., Ull, D. and Wunderlich, H.-J.
Proceedings of the Design, Automation and Test in Europe (DATE'14), Dresden, Germany, 24-28 March 2014
2014
DOI URL PDF 
Keywords: Automotive Structural Diagnosis, BIST
Abstract: With ever more complex automotive systems, the current approach of using functional tests to locate faulty components results in very long analysis procedures and poor diagnostic accuracy. Built-In Self-Test (BIST) offers a promising alternative to collect structural diagnostic information during E/E-architecture test. However, as the automotive industry is quite cost-driven, structural diagnosis shall not deteriorate traditional design objectives. With this goal in mind, the work at hand proposes a design space exploration to integrate structural diagnostic capabilities into an E/E-architecture design. The proposed integration is performed non-intrusively, i. e., the addition and execution of tests (a) does not affect any functional applications and (b) does not require any costly changes in the communication schedules.
BibTeX:
@inproceedings{AbeleCEGRRRTUW2014,
  author = {Abelein, Ulrich and Cook, Alejandro and Engelke, Piet and Glaß, Michael and Reimann, Felix and Rodríguez Gómez, Laura and Russ, Thomas and Teich, Jürgen and Ull, Dominik and Wunderlich, Hans-Joachim},
  title = {{Non-Intrusive Integration of Advanced Diagnosis Features in Automotive E/E-Architectures}},
  booktitle = {Proceedings of the Design, Automation and Test in Europe (DATE'14)},
  year = {2014},
  keywords = {Automotive Structural Diagnosis, BIST},
  abstract = {With ever more complex automotive systems, the current approach of using functional tests to locate faulty components results in very long analysis procedures and poor diagnostic accuracy. Built-In Self-Test (BIST) offers a promising alternative to collect structural diagnostic information during E/E-architecture test. However, as the automotive industry is quite cost-driven, structural diagnosis shall not deteriorate traditional design objectives. With this goal in mind, the work at hand proposes a design space exploration to integrate structural diagnostic capabilities into an E/E-architecture design. The proposed integration is performed non-intrusively, i. e., the addition and execution of tests (a) does not affect any functional applications and (b) does not require any costly changes in the communication schedules.},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6800574},
  doi = {http://dx.doi.org/10.7873/DATE.2014.373},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/DATE_AbeleCEGRRRTUW2014.pdf}
}
210. Bit-Flipping Scan - A Unified Architecture for Fault Tolerance and Offline Test
Imhof, M.E. and Wunderlich, H.-J.
Proceedings of the Design, Automation and Test in Europe (DATE'14), Dresden, Germany, 24-28 March 2014
2014
DOI URL PDF 
Keywords: Bit-Flipping Scan, Fault Tolerance, Test, Compaction, ATPG, Satisfiability
Abstract: Test is an essential task since the early days of digital circuits. Every produced chip undergoes at least a production test supported by on-chip test infrastructure to reduce test cost. Throughout the technology evolution fault tolerance gained importance and is now necessary in many applications to mitigate soft errors threatening consistent operation. While a variety of effective solutions exists to tackle both areas, test and fault tolerance are often implemented orthogonally, and hence do not exploit the potential synergies of a combined solution.
The unified architecture presented here facilitates fault tolerance and test by combining a checksum of the sequential state with the ability to flip arbitrary bits. Experimental results
confirm a reduced area overhead compared to a orthogonal combination of classical test and fault tolerance schemes. In combination with heuristically generated test sequences the test
application time and test data volume are reduced significantly.
BibTeX:
@inproceedings{ImhofW2014,
  author = {Imhof, Michael E. and Wunderlich, Hans-Joachim},
  title = {{Bit-Flipping Scan - A Unified Architecture for Fault Tolerance and Offline Test}},
  booktitle = {Proceedings of the Design, Automation and Test in Europe (DATE'14)},
  year = {2014},
  keywords = {Bit-Flipping Scan, Fault Tolerance, Test, Compaction, ATPG, Satisfiability},
  abstract = {Test is an essential task since the early days of digital circuits. Every produced chip undergoes at least a production test supported by on-chip test infrastructure to reduce test cost. Throughout the technology evolution fault tolerance gained importance and is now necessary in many applications to mitigate soft errors threatening consistent operation. While a variety of effective solutions exists to tackle both areas, test and fault tolerance are often implemented orthogonally, and hence do not exploit the potential synergies of a combined solution.
The unified architecture presented here facilitates fault tolerance and test by combining a checksum of the sequential state with the ability to flip arbitrary bits. Experimental results
confirm a reduced area overhead compared to a orthogonal combination of classical test and fault tolerance schemes. In combination with heuristically generated test sequences the test
application time and test data volume are reduced significantly.}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6800407}, doi = {http://dx.doi.org/10.7873/DATE.2014.206}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/DATE_ImhofW2014.pdf} }
209. Verifikation Rekonfigurierbarer Scan-Netze
Baranowski, R., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the 17. Workshop Methoden und Beschreibungssprachen zur Modellierung und Verifikation von Schaltungen und Systemen (MBMV'14), Böblingen, Germany, 10-12 March 2014, pp. 137-146
2014
URL PDF 
Keywords: Verification, debug and diagnosis, reconfigurable scan network, IJTAG, IEEE P1687, design for test
Abstract: Rekonfigurierbare Scan-Netze, z. B. entsprechend IEEE Std. P1687 oder 1149.1-2013, ermöglichen den effizienten Zugriff auf On-Chip-Infrastruktur für Bringup, Debug, Post-Silicon-Validierung und Diagnose. Diese Scan-Netze sind oft hierarchisch und können komplexe strukturelle und funktionale Abhängigkeiten aufweisen. Bekannte Verfahren zur Verifikation von Scan-Ketten, basierend auf Simulation und struktureller Analyse, sind nicht geeignet, Korrektheitseigenschaften von komplexen Scan-Netzen zu verifizieren. Diese Arbeit stellt ein formales Modell für rekonfigurierbare Scan-Netze vor, welches die strukturellen und funktionalen Abhängigkeiten abbildet und anwendbar ist für Architekturen nach IEEE P1687. Das Modell dient als Grundlage für effizientes Bounded Model Checking von Eigenschaften, wie z. B. der Erreichbarkeit von Scan-Registern.
BibTeX:
@inproceedings{BaranKW2014,
  author = {Baranowski, Rafal and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Verifikation Rekonfigurierbarer Scan-Netze}},
  booktitle = {Proceedings of the 17. Workshop Methoden und Beschreibungssprachen zur Modellierung und Verifikation von Schaltungen und Systemen (MBMV'14)},
  year = {2014},
  pages = {137--146},
  keywords = {Verification, debug and diagnosis, reconfigurable scan network, IJTAG, IEEE P1687, design for test},
  abstract = {Rekonfigurierbare Scan-Netze, z. B. entsprechend IEEE Std. P1687 oder 1149.1-2013, ermöglichen den effizienten Zugriff auf On-Chip-Infrastruktur für Bringup, Debug, Post-Silicon-Validierung und Diagnose. Diese Scan-Netze sind oft hierarchisch und können komplexe strukturelle und funktionale Abhängigkeiten aufweisen. Bekannte Verfahren zur Verifikation von Scan-Ketten, basierend auf Simulation und struktureller Analyse, sind nicht geeignet, Korrektheitseigenschaften von komplexen Scan-Netzen zu verifizieren. Diese Arbeit stellt ein formales Modell für rekonfigurierbare Scan-Netze vor, welches die strukturellen und funktionalen Abhängigkeiten abbildet und anwendbar ist für Architekturen nach IEEE P1687. Das Modell dient als Grundlage für effizientes Bounded Model Checking von Eigenschaften, wie z. B. der Erreichbarkeit von Scan-Registern.},
  url = {https://cuvillier.de/de/shop/publications/6629-mbmv-2014},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2014/MBMV_BaranKW2014.pdf}
}
208. Securing Access to Reconfigurable Scan Networks
Baranowski, R., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the 22nd IEEE Asian Test Symposium (ATS'13), Yilan, Taiwan, 18-21 November 2013
2013
DOI PDF 
Keywords: Debug and diagnosis, reconfigurable scan network, IJTAG, IEEE P1687, secure DFT, hardware security
Abstract: The accessibility of on-chip embedded infrastructure for test, reconfiguration, and debug poses a serious safety and security problem. Special care is required in the design and development of scan architectures based on IEEE Std. 1149.1 (JTAG), IEEE Std. 1500, and especially reconfigurable scan networks, as allowed by the upcoming IEEE P1687 (IJTAG). Traditionally, the scan infrastructure is secured after manufacturing test using fuses that disable the test access port (TAP) completely or partially. The fuse-based approach is efficient if some scan chains or instructions of the TAP controller are to be permanently blocked. However, this approach becomes costly if fine-grained access management is required, and it faces scalability issues in reconfigurable scan networks. In this paper, we propose a scalable solution for multi-level access management in reconfigurable scan networks. The access to protected registers is restricted locally at TAP-level by a sequence filter which allows only a precomputed set of scan-in access sequences. Our approach does not require any modification of the scan architecture and causes no access time penalty. Experimental results for complex reconfigurable scan networks show that the area overhead depends primarily on the number of allowed accesses, and is marginal even if this number exceeds the count of network’s registers.
BibTeX:
@inproceedings{BaranKW2013a,
  author = {Baranowski, Rafal and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Securing Access to Reconfigurable Scan Networks}},
  booktitle = {Proceedings of the 22nd IEEE Asian Test Symposium (ATS'13)},
  year = {2013},
  keywords = {Debug and diagnosis, reconfigurable scan network, IJTAG, IEEE P1687, secure DFT, hardware security},
  abstract = {The accessibility of on-chip embedded infrastructure for test, reconfiguration, and debug poses a serious safety and security problem. Special care is required in the design and development of scan architectures based on IEEE Std. 1149.1 (JTAG), IEEE Std. 1500, and especially reconfigurable scan networks, as allowed by the upcoming IEEE P1687 (IJTAG). Traditionally, the scan infrastructure is secured after manufacturing test using fuses that disable the test access port (TAP) completely or partially. The fuse-based approach is efficient if some scan chains or instructions of the TAP controller are to be permanently blocked. However, this approach becomes costly if fine-grained access management is required, and it faces scalability issues in reconfigurable scan networks. In this paper, we propose a scalable solution for multi-level access management in reconfigurable scan networks. The access to protected registers is restricted locally at TAP-level by a sequence filter which allows only a precomputed set of scan-in access sequences. Our approach does not require any modification of the scan architecture and causes no access time penalty. Experimental results for complex reconfigurable scan networks show that the area overhead depends primarily on the number of allowed accesses, and is marginal even if this number exceeds the count of network’s registers.},
  doi = {http://dx.doi.org/10.1109/ATS.2013.61},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2013/ATS_BaranKW2013.pdf}
}
207. Accurate Multi-Cycle ATPG in Presence of X-Values
Erb, D., Kochte, M.A., Sauer, M., Wunderlich, H.-J. and Becker, B.
Proceedings of the 22nd IEEE Asian Test Symposium (ATS'13), Yilan, Taiwan, 18-21 November 2013
2013
DOI PDF 
Keywords: Unknown values, test generation, ATPG, QBF, multi-cycle, partial scan
Abstract: Unknown (X) values in a circuit impair test quality and increase test costs. Classical n-valued algorithms for fault simulation and ATPG, which typically use a three- or four-valued logic for the good and faulty circuit, are in principle pessimistic in presence of X-values and cannot accurately compute the achievable fault coverage.
In partial scan or pipelined circuits, X-values originate in non-scan flip-flops. These circuits are tested using multi-cycle tests. Here we present multi-cycle test generation techniques for circuits with X-values due to partial scan or other X-sources. The proposed techniques have been integrated into a multi-cycle ATPG framework which employs formal Boolean and quantified Boolean (QBF) satisfiability techniques to compute the possible signal states in the circuit accurately. Efficient encoding of the problem instance ensures reasonable runtimes.
We show that in presence of X-values, the detection of stuck-at faults requires not only exact formal reasoning in a single cycle, but especially the consideration of multiple cycles for excitation of the fault site as well as propagation and controlled reconvergence of fault effects.
For the first time, accurate deterministic ATPG for multi-cycle test application is supported for stuck-at faults. Experiments on ISCAS'89 and industrial circuits with X-sources show that this new approach increases the fault coverage considerably.
BibTeX:
@inproceedings{ErbKSWB2013,
  author = {Erb, Dominik and Kochte, Michael A. and Sauer, Matthias and Wunderlich, Hans-Joachim and Becker, Bernd},
  title = {{Accurate Multi-Cycle ATPG in Presence of X-Values}},
  booktitle = {Proceedings of the 22nd IEEE Asian Test Symposium (ATS'13)},
  year = {2013},
  keywords = {Unknown values, test generation, ATPG, QBF, multi-cycle, partial scan},
  abstract = { Unknown (X) values in a circuit impair test quality and increase test costs. Classical n-valued algorithms for fault simulation and ATPG, which typically use a three- or four-valued logic for the good and faulty circuit, are in principle pessimistic in presence of X-values and cannot accurately compute the achievable fault coverage.
In partial scan or pipelined circuits, X-values originate in non-scan flip-flops. These circuits are tested using multi-cycle tests. Here we present multi-cycle test generation techniques for circuits with X-values due to partial scan or other X-sources. The proposed techniques have been integrated into a multi-cycle ATPG framework which employs formal Boolean and quantified Boolean (QBF) satisfiability techniques to compute the possible signal states in the circuit accurately. Efficient encoding of the problem instance ensures reasonable runtimes.
We show that in presence of X-values, the detection of stuck-at faults requires not only exact formal reasoning in a single cycle, but especially the consideration of multiple cycles for excitation of the fault site as well as propagation and controlled reconvergence of fault effects.
For the first time, accurate deterministic ATPG for multi-cycle test application is supported for stuck-at faults. Experiments on ISCAS'89 and industrial circuits with X-sources show that this new approach increases the fault coverage considerably. }, doi = {http://dx.doi.org/10.1109/ATS.2013.53}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2013/ATS_ErbKSWB2013.pdf} }
206. Synthesis of Workload Monitors for On-Line Stress Prediction
Baranowski, R., Cook, A., Imhof, M.E., Liu, C. and Wunderlich, H.-J.
Proceedings of the 16th IEEE Symp. Defect and Fault Tolerance in VLSI and Nanotechnology Systems (DFT'13), New York City, New York, USA, 2-4 October 2013, pp. 137-142
2013
DOI URL PDF 
Keywords: Reliability estimation, workload monitoring, aging prediction, NBTI
Abstract: Stringent reliability requirements call for monitoring mechanisms to account for circuit degradation throughout the complete system lifetime. In this work, we efficiently monitor the stress experienced by the system as a result of its current workload. To achieve this goal, we construct workload monitors that observe the most relevant subset of the circuit’s primary and pseudo-primary inputs and produce an accurate stress approximation. The proposed approach enables the timely adoption of
suitable countermeasures to reduce or prevent any deviation from the intended circuit behavior. The relation between monitoring accuracy and hardware cost can be adjusted according to design requirements. Experimental results show the efficiency of the proposed approach for the prediction of stress induced by Negative Bias Temperature Instability (NBTI) in critical and near-critical paths of a digital circuit.
BibTeX:
@inproceedings{BaranCILW2013,
  author = {Baranowski, Rafal and Cook, Alejandro and Imhof, Michael E. and Liu, Chang and Wunderlich, Hans-Joachim},
  title = {{Synthesis of Workload Monitors for On-Line Stress Prediction}},
  booktitle = {Proceedings of the 16th IEEE Symp. Defect and Fault Tolerance in VLSI and Nanotechnology Systems (DFT'13)},
  year = {2013},
  pages = {137--142},
  keywords = {Reliability estimation, workload monitoring, aging prediction, NBTI},
  abstract = {Stringent reliability requirements call for monitoring mechanisms to account for circuit degradation throughout the complete system lifetime. In this work, we efficiently monitor the stress experienced by the system as a result of its current workload. To achieve this goal, we construct workload monitors that observe the most relevant subset of the circuit’s primary and pseudo-primary inputs and produce an accurate stress approximation. The proposed approach enables the timely adoption of
suitable countermeasures to reduce or prevent any deviation from the intended circuit behavior. The relation between monitoring accuracy and hardware cost can be adjusted according to design requirements. Experimental results show the efficiency of the proposed approach for the prediction of stress induced by Negative Bias Temperature Instability (NBTI) in critical and near-critical paths of a digital circuit.}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6653596}, doi = {http://dx.doi.org/10.1109/DFT.2013.6653596}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2013/DFTS_BaranCILW2013.pdf} }
205. SAT-based Code Synthesis for Fault-Secure Circuits
Dalirsani, A., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the 16th IEEE Symp. Defect and Fault Tolerance in VLSI and Nanotechnology Systems (DFT'13), New York City, NY, USA, 2-4 October 2013, pp. 38-44
2013
DOI URL PDF 
Keywords: Concurrent error detection (CED), error control coding, self-checking circuit, totally self-checking (TSC)
Abstract: This paper presents a novel method for synthesizing fault-secure circuits based on parity codes over groups of circuit outputs. The fault-secure circuit is able to detect all errors resulting from combinational and transition faults at a single node. The original circuit is not modified. If the original circuit is non-redundant, the result is a totally self-checking circuit. At first, the method creates the minimum number of parity groups such that the effect of each fault is not masked in at least one parity group. To ensure fault-secureness, the obtained groups are split such that no fault leads to silent data corruption. This is performed by a formal Boolean satisfiability (SAT) based analysis. Since the proposed method reduces the number of required parity groups, the number of two-rail checkers and the complexity of the prediction logic required for fault-secureness decreases as well. Experimental results show that the area overhead is much less compared to duplication and less in comparison to previous methods for synthesis of totally self-checking circuits. Since the original circuit is not modified, the method can be applied for fixed hard macros and IP cores.
BibTeX:
@inproceedings{DalirKW2013,
  author = {Dalirsani, Atefe and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{SAT-based Code Synthesis for Fault-Secure Circuits}},
  booktitle = {Proceedings of the 16th IEEE Symp. Defect and Fault Tolerance in VLSI and Nanotechnology Systems (DFT'13)},
  year = {2013},
  pages = {38--44},
  keywords = {Concurrent error detection (CED), error control coding, self-checking circuit, totally self-checking (TSC)},
  abstract = {This paper presents a novel method for synthesizing fault-secure circuits based on parity codes over groups of circuit outputs. The fault-secure circuit is able to detect all errors resulting from combinational and transition faults at a single node. The original circuit is not modified. If the original circuit is non-redundant, the result is a totally self-checking circuit. At first, the method creates the minimum number of parity groups such that the effect of each fault is not masked in at least one parity group. To ensure fault-secureness, the obtained groups are split such that no fault leads to silent data corruption. This is performed by a formal Boolean satisfiability (SAT) based analysis. Since the proposed method reduces the number of required parity groups, the number of two-rail checkers and the complexity of the prediction logic required for fault-secureness decreases as well. Experimental results show that the area overhead is much less compared to duplication and less in comparison to previous methods for synthesis of totally self-checking circuits. Since the original circuit is not modified, the method can be applied for fixed hard macros and IP cores.},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?tp=&arnumber=6653580},
  doi = {http://dx.doi.org/10.1109/DFT.2013.6653580},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2013/DFTS_DalirKW2013.pdf}
}
204. Module Diversification: Fault Tolerance and Aging Mitigation for Runtime Reconfigurable Architectures
Zhang, H., Bauer, L., Kochte, M.A., Schneider, E., Braun, C., Imhof, M.E., Wunderlich, H.-J. and Henkel, J.
Proceedings of the IEEE International Test Conference (ITC'13), Anaheim, California, USA, 10-12 September 2013
2013
DOI URL PDF 
Keywords: Reliability, online test, fault-tolerance, aging mitigation, partial runtime reconfiguration, FPGA
Abstract: Runtime reconfigurable architectures based on Field-Programmable Gate Arrays (FPGAs) are attractive for realizing complex applications. However, being manufactured in latest semiconductor process technologies, FPGAs are increasingly prone to aging effects, which reduce the reliability of such systems and must be tackled by aging mitigation and application of fault tolerance techniques. This paper presents module diversification, a novel design method that creates different configurations for runtime reconfigurable modules. Our method provides fault tolerance by creating the minimal number of configurations such that for any faulty Configurable Logic Block (CLB) there is at least one configuration that does not use that CLB. Additionally, we determine the fraction of time that each configuration should be used to balance the stress and to mitigate the aging process in FPGA-based runtime reconfigurable systems. The generated configurations significantly improve reliability by fault-tolerance and aging mitigation.
BibTeX:
@inproceedings{ZhangBKSBIWH2013,
  author = {Zhang, Hongyan and Bauer, Lars and Kochte, Michael A. and Schneider, Eric and Braun, Claus and Imhof, Michael E. and Wunderlich, Hans-Joachim and Henkel, Jörg},
  title = {{Module Diversification: Fault Tolerance and Aging Mitigation for Runtime Reconfigurable Architectures}},
  booktitle = {Proceedings of the IEEE International Test Conference (ITC'13)},
  year = {2013},
  keywords = {Reliability, online test, fault-tolerance, aging mitigation, partial runtime reconfiguration, FPGA},
  abstract = {Runtime reconfigurable architectures based on Field-Programmable Gate Arrays (FPGAs) are attractive for realizing complex applications. However, being manufactured in latest semiconductor process technologies, FPGAs are increasingly prone to aging effects, which reduce the reliability of such systems and must be tackled by aging mitigation and application of fault tolerance techniques. This paper presents module diversification, a novel design method that creates different configurations for runtime reconfigurable modules. Our method provides fault tolerance by creating the minimal number of configurations such that for any faulty Configurable Logic Block (CLB) there is at least one configuration that does not use that CLB. Additionally, we determine the fraction of time that each configuration should be used to balance the stress and to mitigate the aging process in FPGA-based runtime reconfigurable systems. The generated configurations significantly improve reliability by fault-tolerance and aging mitigation.},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6651926},
  doi = {http://dx.doi.org/10.1109/TEST.2013.6651926},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2013/ITC_ZhangBKSBIWH2013.pdf}
}
203. Test Strategies for Reliable Runtime Reconfigurable Architectures
Bauer, L., Braun, C., Imhof, M.E., Kochte, M.A., Schneider, E., Zhang, H., Henkel, J. and Wunderlich, H.-J.
IEEE Transactions on Computers
Vol. 62(8), Los Alamitos, California, USA, August 2013, pp. 1494-1507
2013
DOI URL PDF 
Keywords: FPGA, Reconfigurable Architectures, Online Test
Abstract: FPGA-based reconfigurable systems allow the online adaptation to dynamically changing runtime requirements. The reliability of FPGAs, being manufactured in latest technologies, is threatened by soft errors, as well as aging effects and latent defects.To ensure reliable reconfiguration, it is mandatory to guarantee the correct operation of the reconfigurable fabric. This can be achieved by periodic or on-demand online testing. This paper presents a reliable system architecture for runtime-reconfigurable systems, which integrates two non-concurrent online test strategies: Pre-configuration online tests (PRET) and post-configuration online tests (PORT). The PRET checks that the reconfigurable hardware is free of faults by periodic or on-demand tests. The PORT has two objectives: It tests reconfigured hardware units after reconfiguration to check that the configuration process completed correctly and it validates the expected functionality. During operation, PORT is used to periodically check the reconfigured hardware units for malfunctions in the programmable logic. Altogether, this paper presents PRET, PORT, and the system integration of such test schemes into a runtime-reconfigurable system, including the resource management and test scheduling. Experimental results show that the integration of online testing in reconfigurable systems incurs only minimum impact on performance while delivering high fault coverage and low test latency.
BibTeX:
@article{BauerBIKSZHW2013,
  author = {Bauer, Lars and Braun, Claus and Imhof, Michael E. and Kochte, Michael A. and Schneider, Eric and Zhang, Hongyan and Henkel, Jörg and Wunderlich, Hans-Joachim},
  title = {{Test Strategies for Reliable Runtime Reconfigurable Architectures}},
  journal = {IEEE Transactions on Computers},
  publisher = {IEEE Computer Society},
  year = {2013},
  volume = {62},
  number = {8},
  pages = {1494--1507},
  keywords = {FPGA, Reconfigurable Architectures, Online Test},
  abstract = {FPGA-based reconfigurable systems allow the online adaptation to dynamically changing runtime requirements. The reliability of FPGAs, being manufactured in latest technologies, is threatened by soft errors, as well as aging effects and latent defects.To ensure reliable reconfiguration, it is mandatory to guarantee the correct operation of the reconfigurable fabric. This can be achieved by periodic or on-demand online testing. This paper presents a reliable system architecture for runtime-reconfigurable systems, which integrates two non-concurrent online test strategies: Pre-configuration online tests (PRET) and post-configuration online tests (PORT). The PRET checks that the reconfigurable hardware is free of faults by periodic or on-demand tests. The PORT has two objectives: It tests reconfigured hardware units after reconfiguration to check that the configuration process completed correctly and it validates the expected functionality. During operation, PORT is used to periodically check the reconfigured hardware units for malfunctions in the programmable logic. Altogether, this paper presents PRET, PORT, and the system integration of such test schemes into a runtime-reconfigurable system, including the resource management and test scheduling. Experimental results show that the integration of online testing in reconfigurable systems incurs only minimum impact on performance while delivering high fault coverage and low test latency.},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6475939},
  doi = {http://dx.doi.org/10.1109/TC.2013.53},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2013/TC_BauerBIKSZHW2013.pdf}
}
202. Efficacy and Efficiency of Algorithm-Based Fault Tolerance on GPUs
Wunderlich, H.-J., Braun, C. and Halder, S.
Proceedings of the IEEE International On-Line Testing Symposium (IOLTS'13), Crete, Greece, 8-10 July 2013, pp. 240-243
2013
DOI PDF 
Keywords: Scientific Computing, GPGPU, Soft Errors, Fault Simulation, Algorithm-based Fault Tolerance
Abstract: Computer simulations drive innovations in science and industry, and they are gaining more and more importance. However, their high computational demand generates extraordinary challenges for computing systems. Typical highperformance computing systems, which provide sufficient performance and high reliability, are extremly expensive.
Modern GPUs offer high performance at very low costs, and they enable simulation applications on the desktop. However, they are increasingly prone to transient effects and other reliability threats. To fulfill the strict reliability requirements in scientific computing and simulation technology, appropriate fault tolerance measures have to be integrated into simulation applications for GPUs. Algorithm-Based Fault Tolerance on GPUs has the potential to meet these requirements.
In this work we investigate the efficiency and the efficacy of ABFT for matrix operations on GPUs. We compare ABFT against fault tolerance schemes that are based on redundant computations and we evaluate its error detection capabilities
BibTeX:
@inproceedings{WundeBH2013,
  author = {Wunderlich, Hans-Joachim and Braun, Claus and Halder, Sebastian},
  title = {{Efficacy and Efficiency of Algorithm-Based Fault Tolerance on GPUs}},
  booktitle = {Proceedings of the IEEE International On-Line Testing Symposium (IOLTS'13)},
  year = {2013},
  pages = {240--243},
  keywords = {Scientific Computing, GPGPU, Soft Errors, Fault Simulation, Algorithm-based Fault Tolerance},
  abstract = {Computer simulations drive innovations in science and industry, and they are gaining more and more importance. However, their high computational demand generates extraordinary challenges for computing systems. Typical highperformance computing systems, which provide sufficient performance and high reliability, are extremly expensive.
Modern GPUs offer high performance at very low costs, and they enable simulation applications on the desktop. However, they are increasingly prone to transient effects and other reliability threats. To fulfill the strict reliability requirements in scientific computing and simulation technology, appropriate fault tolerance measures have to be integrated into simulation applications for GPUs. Algorithm-Based Fault Tolerance on GPUs has the potential to meet these requirements.
In this work we investigate the efficiency and the efficacy of ABFT for matrix operations on GPUs. We compare ABFT against fault tolerance schemes that are based on redundant computations and we evaluate its error detection capabilities}, doi = {http://dx.doi.org/10.1109/IOLTS.2013.6604090}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2013/IOLTS_WundeBH2013.pdf} }
201. Scan Pattern Retargeting and Merging with Reduced Access Time
Baranowski, R., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the IEEE European Test Symposium (ETS'13), Avignon, France, 27-30 May 2013, pp. 39-45
2013
DOI PDF 
Keywords: Design for debug & diagnosis, optimal pattern retargeting, scan pattern generation, reconfigurable scan network, IJTAG, P1687
Abstract: Efficient access to on-chip instrumentation is a key enabler for post-silicon validation, debug, bringup or diagnosis. Re- configurable scan networks, as proposed by e.g. the IEEE Std. P1687, emerge as an effective and affordable means to cope with the increasing complexity of on-chip infrastructure. To access an element in a reconfigurable scan network, a scan- in bit sequence must be generated according to the current state and structure of the network. Due to sequential and combinational dependencies, the scan pattern generation process (pattern retargeting) poses a complex decision and optimization problem. This work presents a method for scan pattern generation with reduced access time. We map the access time reduction to a pseudo- Boolean optimization problem, which enables the use of efficient solvers to exhaustively explore the search space of valid scan-in sequences. This is the first automated method for efficient pattern retargeting in complex reconfigurable scan architectures such as P1687- based networks. It supports the concurrent access to multiple target scan registers (access merging) and generates reduced (short) scan-in sequences, considering all sequential and combinational dependencies. The proposed method achieves an access time reduction by up to 88x or 2.4x in average w.r.t. unoptimized satisfying solutions.
BibTeX:
@inproceedings{BaranKW2013,
  author = {Baranowski, Rafal and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Scan Pattern Retargeting and Merging with Reduced Access Time}},
  booktitle = {Proceedings of the IEEE European Test Symposium (ETS'13)},
  publisher = {IEEE Computer Society},
  year = {2013},
  pages = {39--45},
  keywords = {Design for debug & diagnosis, optimal pattern retargeting, scan pattern generation, reconfigurable scan network, IJTAG, P1687},
  abstract = {Efficient access to on-chip instrumentation is a key enabler for post-silicon validation, debug, bringup or diagnosis. Re- configurable scan networks, as proposed by e.g. the IEEE Std. P1687, emerge as an effective and affordable means to cope with the increasing complexity of on-chip infrastructure. To access an element in a reconfigurable scan network, a scan- in bit sequence must be generated according to the current state and structure of the network. Due to sequential and combinational dependencies, the scan pattern generation process (pattern retargeting) poses a complex decision and optimization problem. This work presents a method for scan pattern generation with reduced access time. We map the access time reduction to a pseudo- Boolean optimization problem, which enables the use of efficient solvers to exhaustively explore the search space of valid scan-in sequences. This is the first automated method for efficient pattern retargeting in complex reconfigurable scan architectures such as P1687- based networks. It supports the concurrent access to multiple target scan registers (access merging) and generates reduced (short) scan-in sequences, considering all sequential and combinational dependencies. The proposed method achieves an access time reduction by up to 88x or 2.4x in average w.r.t. unoptimized satisfying solutions.},
  doi = {http://dx.doi.org/10.1109/ETS.2013.6569354},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2013/ETS_BaranKW2013.pdf}
}
200. Accurate QBF-based Test Pattern Generation in Presence of Unknown Values
Hillebrecht, S., Kochte, M.A., Erb, D., Wunderlich, H.-J. and Becker, B.
Proceedings of the Conference on Design, Automation and Test in Europe (DATE'13), Grenoble, France, 18-22 March 2013, pp. 436-441
2013
DOI PDF 
Keywords: Unknown values, test generation, ATPG, QBF
Abstract: Unknown (X) values may emerge during the design process as well as during system operation and test application. Sources of X-values are for example black boxes, clockdomain boundaries, analog-to-digital converters, or uncontrolled or uninitialized sequential elements. To compute a detecting pattern for a given stuck-at fault, well defined logic values are required both for fault activation as well as for fault effect propagation to observing outputs. In presence of X-values, classical test generation algorithms, based on topological algorithms or formal Boolean satisfiability (SAT) or BDD-based reasoning, may fail to generate testing patterns or to prove faults untestable. This work proposes the first efficient stuck-at fault ATPG algorithm able to prove testability or untestability of faults in presence of X-values. It overcomes the principal inaccuracy and pessimism of classical algorithms when X-values are considered. This accuracy is achieved by mapping the test generation problem to an instance of quantified Boolean formula (QBF) satisfiability. The resulting fault coverage improvement is shown by experimental results on ISCAS benchmark and larger industrial circuits.
BibTeX:
@inproceedings{HilleKEWB2013,
  author = {Hillebrecht, Stefan and Kochte, Michael A. and Erb, Dominik and Wunderlich, Hans-Joachim and Becker, Bernd},
  title = {{Accurate QBF-based Test Pattern Generation in Presence of Unknown Values}},
  booktitle = {Proceedings of the Conference on Design, Automation and Test in Europe (DATE'13)},
  publisher = {IEEE Computer Society},
  year = {2013},
  pages = {436--441},
  keywords = {Unknown values, test generation, ATPG, QBF},
  abstract = {Unknown (X) values may emerge during the design process as well as during system operation and test application. Sources of X-values are for example black boxes, clockdomain boundaries, analog-to-digital converters, or uncontrolled or uninitialized sequential elements. To compute a detecting pattern for a given stuck-at fault, well defined logic values are required both for fault activation as well as for fault effect propagation to observing outputs. In presence of X-values, classical test generation algorithms, based on topological algorithms or formal Boolean satisfiability (SAT) or BDD-based reasoning, may fail to generate testing patterns or to prove faults untestable. This work proposes the first efficient stuck-at fault ATPG algorithm able to prove testability or untestability of faults in presence of X-values. It overcomes the principal inaccuracy and pessimism of classical algorithms when X-values are considered. This accuracy is achieved by mapping the test generation problem to an instance of quantified Boolean formula (QBF) satisfiability. The resulting fault coverage improvement is shown by experimental results on ISCAS benchmark and larger industrial circuits.},
  doi = {http://dx.doi.org/10.7873/DATE.2013.098},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2013/DATE_HilleKEWB2013.pdf}
}
199. Efficient Variation-Aware Statistical Dynamic Timing Analysis for Delay Test Applications
Wagner, M. and Wunderlich, H.-J.
Proceedings of the Conference on Design, Automation and Test in Europe (DATE'13), Grenoble, France, 18-22 March 2013, pp. 276-281
2013
DOI PDF 
Abstract: Increasing parameter variations, caused by variations in process, temperature, power supply, and wear-out, have emerged as one of the most important challenges in semiconductor manufacturing and test. As a consequence for gate delay testing, a single test vector pair is no longer sufficient to provide the required low test escape probabilities for a single delay fault. Recently proposed statistical test generation methods are therefore guided by a metric, which defines the probability of detecting a delay fault with a given test set. However, since run time and accuracy are dominated by the large number of required metric evaluations, more efficient approximation methods are mandatory for any practical application. In this work, a new statistical dynamic timing analysis algorithm is introduced to tackle this problem. The associated approximation error is very small and predominantly caused by the impact of delay variations on path sensitization and hazards. The experimental results show a large speedup compared to classical Monte Carlo simulations.
BibTeX:
@inproceedings{WagneW2013,
  author = {Wagner, Marcus and Wunderlich, Hans-Joachim},
  title = {{Efficient Variation-Aware Statistical Dynamic Timing Analysis for Delay Test Applications }},
  booktitle = {Proceedings of the Conference on Design, Automation and Test in Europe (DATE'13)},
  year = {2013},
  pages = {276--281},
  abstract = {Increasing parameter variations, caused by variations in process, temperature, power supply, and wear-out, have emerged as one of the most important challenges in semiconductor manufacturing and test. As a consequence for gate delay testing, a single test vector pair is no longer sufficient to provide the required low test escape probabilities for a single delay fault. Recently proposed statistical test generation methods are therefore guided by a metric, which defines the probability of detecting a delay fault with a given test set. However, since run time and accuracy are dominated by the large number of required metric evaluations, more efficient approximation methods are mandatory for any practical application. In this work, a new statistical dynamic timing analysis algorithm is introduced to tackle this problem. The associated approximation error is very small and predominantly caused by the impact of delay variations on path sensitization and hazards. The experimental results show a large speedup compared to classical Monte Carlo simulations.},
  doi = {http://dx.doi.org/10.7873/DATE.2013.069},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2013/DATE_WagneW2013.pdf}
}
198. Accurate X-Propagation for Test Applications by SAT-Based Reasoning
Kochte, M.A., Elm, M. and Wunderlich, H.-J.
IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)
Vol. 31(12), December 2012, pp. 1908-1919
2012
DOI PDF 
Keywords: Unknown values; stuck-at fault coverage; accurate fault simulation; simulation pessimism
Abstract: Unknown or X-values during test application may originate from uncontrolled sequential cells or macros, from clock or A/D boundaries or from tri-state logic. The exact identification of X-value propagation paths in logic circuits is crucial in logic simulation and fault simulation. In the first case, it enables the proper assessment of expected responses and the effective and efficient handling of X-values during test response compaction. In the second case, it is important for a proper assessment of fault coverage of a given test set and consequently influences the efficiency of test pattern generation. The commonly employed n-valued logic simulation evaluates the propagation of X-values only pessimistically, i.e. the X-propagation paths found by n- valued logic simulation are a superset of the actual propagation paths. This paper presents an efficient method to overcome this pessimism and to determine accurately the set of signals which carry an X-value for an input pattern. As examples, it investigates the influence of this pessimism on the two applications X-masking and stuck-at fault coverage assessment. The experimental results on benchmark and industrial circuits assess the pessimism of classic algorithms and show that these algorithms significantly overestimate the signals with X-values. The experiments show that overmasking of test data during test compression can be reduced by an accurate analysis. In stuck-at fault simulation, the coverage of the test set is increased by the proposed algorithm without incurring any overhead.
BibTeX:
@article{KochtEW2012,
  author = {Kochte, Michael A. and Elm, Melanie and Wunderlich, Hans-Joachim},
  title = {{Accurate X-Propagation for Test Applications by SAT-Based Reasoning}},
  journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)},
  publisher = {IEEE Computer Society},
  year = {2012},
  volume = {31},
  number = {12},
  pages = {1908--1919},
  keywords = {Unknown values; stuck-at fault coverage; accurate fault simulation; simulation pessimism},
  abstract = {Unknown or X-values during test application may originate from uncontrolled sequential cells or macros, from clock or A/D boundaries or from tri-state logic. The exact identification of X-value propagation paths in logic circuits is crucial in logic simulation and fault simulation. In the first case, it enables the proper assessment of expected responses and the effective and efficient handling of X-values during test response compaction. In the second case, it is important for a proper assessment of fault coverage of a given test set and consequently influences the efficiency of test pattern generation. The commonly employed n-valued logic simulation evaluates the propagation of X-values only pessimistically, i.e. the X-propagation paths found by n- valued logic simulation are a superset of the actual propagation paths. This paper presents an efficient method to overcome this pessimism and to determine accurately the set of signals which carry an X-value for an input pattern. As examples, it investigates the influence of this pessimism on the two applications X-masking and stuck-at fault coverage assessment. The experimental results on benchmark and industrial circuits assess the pessimism of classic algorithms and show that these algorithms significantly overestimate the signals with X-values. The experiments show that overmasking of test data during test compression can be reduced by an accurate analysis. In stuck-at fault simulation, the coverage of the test set is increased by the proposed algorithm without incurring any overhead.},
  doi = {http://dx.doi.org/10.1109/TCAD.2012.2210422},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/TCAD_KochtEW2012.pdf}
}
197. Reuse of Structural Volume Test Methods for In-System Testing of Automotive ASICs
Cook, A., Ull, D., Elm, M., Wunderlich, H.-J., Randoll, H. and Döhren, S.
Proceedings of the 21st IEEE Asian Test Symposium (ATS'12), Niigata, Japan, 19-22 November 2012, pp. 214-219
2012
DOI PDF 
Keywords: system test, scan-test, in-field, automotive, electronic control unit
Abstract: The automotive industry has to deal with an increasing amount of electronics in today’s vehicles. This paper describes the advantages of structural tests during in-field system test, reusing existing test data and on-chip structures. Demonstration is the embedded test of an ASIC within an automotive control unit, utilizing manufacturing scan-tests.
BibTeX:
@inproceedings{CookUEWRD2012,
  author = {Cook, Alejandro and Ull, Dominik and Elm, Melanie and Wunderlich, Hans-Joachim and Randoll, H. and Döhren, S.},
  title = {{Reuse of Structural Volume Test Methods for In-System Testing of Automotive ASICs}},
  booktitle = {Proceedings of the 21st IEEE Asian Test Symposium (ATS'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {214--219},
  keywords = {system test, scan-test, in-field, automotive, electronic control unit},
  abstract = {The automotive industry has to deal with an increasing amount of electronics in today’s vehicles. This paper describes the advantages of structural tests during in-field system test, reusing existing test data and on-chip structures. Demonstration is the embedded test of an ASIC within an automotive control unit, utilizing manufacturing scan-tests.},
  doi = {http://dx.doi.org/10.1109/ATS.2012.32},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/ATS_CookUEWRD2012.pdf}
}
196. Variation-Aware Fault Grading
Czutro, A., Imhof, M.E., Jiang, J., Mumtaz, A., Sauer, M., Becker, B., Polian, I. and Wunderlich, H.-J.
Proceedings of the 21st IEEE Asian Test Symposium (ATS'12), Niigata, Japan, 19-22 November 2012, pp. 344-349
2012
DOI PDF 
Keywords: process variations, fault grading, Monte-Carlo, fault simulation, SAT-based, ATPG, GPGPU
Abstract: An iterative flow to generate test sets providing high fault coverage under extreme parameter variations is presented. The generation is guided by the novel metric of circuit coverage, calculated by massively parallel statistical fault simulation on GPGPUs. Experiments show that the statistical fault coverage of the generated test sets exceeds by far that achieved by standard approaches.
BibTeX:
@inproceedings{CzutrIJMSBPW2012,
  author = {Czutro, A. and Imhof, Michael E. and Jiang, J. and Mumtaz, Abdullah and Sauer, M. and Becker, Bernd and Polian, Ilia and Wunderlich, Hans-Joachim},
  title = {{Variation-Aware Fault Grading}},
  booktitle = {Proceedings of the 21st IEEE Asian Test Symposium (ATS'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {344--349},
  keywords = {process variations, fault grading, Monte-Carlo, fault simulation, SAT-based, ATPG, GPGPU},
  abstract = {An iterative flow to generate test sets providing high fault coverage under extreme parameter variations is presented. The generation is guided by the novel metric of circuit coverage, calculated by massively parallel statistical fault simulation on GPGPUs. Experiments show that the statistical fault coverage of the generated test sets exceeds by far that achieved by standard approaches.},
  doi = {http://dx.doi.org/10.1109/ATS.2012.14},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/ATS_CzutrIJMSBPW2012.pdf}
}
195. Scan Test Power Simulation on GPGPUs
Holst, S., Schneider, E. and Wunderlich, H.-J.
Proceedings of the 21st IEEE Asian Test Symposium (ATS'12), Niigata, Japan, 19-22 November 2012, pp. 155-160
2012
DOI PDF 
Keywords: GPGPU, Data–Parallelism, Scan–Test, Power, Time–Simulation, Hazards, Pulse–Filtering
Abstract: The precise estimation of dynamic power consumption, power droop and temperature development during scan test require a very large number of time–aware gate–level logic simulations. Until now, such characterizations have been feasible only for rather small designs or with reduced precision due to the high computational demands. We propose a new, throughput–optimized timing simulator on running on GPGPUs to accelerate these tasks by more than two orders of magnitude and thus providing for the first time precise and comprehensive toggle data for industrial–sized designs and over long scan test operations. Hazards and pulse–filtering are supported for the first time in a GPGPU accelerated simulator, and the system can easily be extended to even more sophisticated delay and power models.
BibTeX:
@inproceedings{HolstSW2012,
  author = {Holst, Stefan and Schneider, Eric and Wunderlich, Hans-Joachim},
  title = {{Scan Test Power Simulation on GPGPUs}},
  booktitle = {Proceedings of the 21st IEEE Asian Test Symposium (ATS'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {155--160},
  keywords = {GPGPU, Data–Parallelism, Scan–Test, Power, Time–Simulation, Hazards, Pulse–Filtering},
  abstract = {The precise estimation of dynamic power consumption, power droop and temperature development during scan test require a very large number of time–aware gate–level logic simulations. Until now, such characterizations have been feasible only for rather small designs or with reduced precision due to the high computational demands. We propose a new, throughput–optimized timing simulator on running on GPGPUs to accelerate these tasks by more than two orders of magnitude and thus providing for the first time precise and comprehensive toggle data for industrial–sized designs and over long scan test operations. Hazards and pulse–filtering are supported for the first time in a GPGPU accelerated simulator, and the system can easily be extended to even more sophisticated delay and power models.},
  doi = {http://dx.doi.org/10.1109/ATS.2012.23},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/ATS_HolstSW2012.pdf}
}
194. Modeling, Verification and Pattern Generation for Reconfigurable Scan Networks
Baranowski, R., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the IEEE International Test Conference (ITC'12), Anaheim, California, USA, 5-8 November 2012, pp. 1-9
2012
DOI PDF 
Keywords: Reconfigurable scan network, Pattern generation, Pattern retargeting, DFT, IJTAG, P1687
Abstract: Reconfigurable scan architectures allow flexible integration and efficient access to infrastructure in SoCs, e.g. for test, diagnosis, repair or debug. Such scan networks are often hierarchical and have complex structural and functional dependencies. For instance, the IEEE P1687 proposal, known as IJTAG, allows integration of multiplexed scan networks with arbitrary internal control signals. Common approaches for scan verification based on static structural analysis and functional simulation are not sufficient to ensure correct operation of these types of architectures. Hierarchy and flexibility may result in complex or even contradicting configuration requirements to access single elements. Sequential logic justification is therefore mandatory both to verify the validity of a scan network, and to generate the required access sequences. This work presents a formal method for verification of reconfigurable scan architectures, as well as pattern retargeting, i.e. generation of required scan-in data. The method is based on a formal model of structural and functional dependencies. Network verification and pattern retargeting is mapped to a Boolean satisfiability problem, which enables the use of efficient SAT solvers to exhaustively explore the search space of valid scan configurations.
BibTeX:
@inproceedings{BaranKW2012,
  author = {Baranowski, Rafal and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{Modeling, Verification and Pattern Generation for Reconfigurable Scan Networks}},
  booktitle = {Proceedings of the IEEE International Test Conference (ITC'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {1--9},
  keywords = {Reconfigurable scan network, Pattern generation, Pattern retargeting, DFT, IJTAG, P1687},
  abstract = {Reconfigurable scan architectures allow flexible integration and efficient access to infrastructure in SoCs, e.g. for test, diagnosis, repair or debug. Such scan networks are often hierarchical and have complex structural and functional dependencies. For instance, the IEEE P1687 proposal, known as IJTAG, allows integration of multiplexed scan networks with arbitrary internal control signals. Common approaches for scan verification based on static structural analysis and functional simulation are not sufficient to ensure correct operation of these types of architectures. Hierarchy and flexibility may result in complex or even contradicting configuration requirements to access single elements. Sequential logic justification is therefore mandatory both to verify the validity of a scan network, and to generate the required access sequences. This work presents a formal method for verification of reconfigurable scan architectures, as well as pattern retargeting, i.e. generation of required scan-in data. The method is based on a formal model of structural and functional dependencies. Network verification and pattern retargeting is mapped to a Boolean satisfiability problem, which enables the use of efficient SAT solvers to exhaustively explore the search space of valid scan configurations.},
  doi = {http://dx.doi.org/10.1109/TEST.2012.6401555},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/ITC_BaranKW2012.pdf}
}
193. Parallel Simulation of Apoptotic Receptor-Clustering on GPGPU Many-Core Architectures
Braun, C., Daub, M., Schöll, A., Schneider, G. and Wunderlich, H.-J.
Proceedings of the IEEE International Conference on Bioinformatics and Biomedicine (BIBM'12), Philadelphia, Pennsylvania, USA, 4-7 October 2012, pp. 1-6
2012
DOI PDF 
Keywords: GPGPU; parallel particle simulation; numerical modeling; apoptosis; receptor-clustering
Abstract: Apoptosis, the programmed cell death, is a physiological process that handles the removal of unwanted or damaged cells in living organisms. The process itself is initiated by signaling through tumor necrosis factor (TNF) receptors and ligands, which form clusters on the cell membrane. The exact function of this process is not yet fully understood and currently subject of basic research. Different mathematical models have been developed to describe and simulate the apoptotic receptor-clustering.
In this interdisciplinary work, a previously introduced model of the apoptotic receptor-clustering has been extended by a new receptor type to allow a more precise description and simulation of the signaling process. Due to the high computational requirements of the model, an ef?cient algorithmic mapping to a modern many-core GPGPU architecture has been developed. Such architectures enable high-performance computing (HPC) simulation tasks on the desktop at low costs. The developed mapping reduces average simulation times from months to days (peak speedup of 256x), allowing the productive use of the model in research.
BibTeX:
@inproceedings{BraunDSSW2012,
  author = {Braun, Claus and Daub, Markus and Schöll, Alexander and Schneider, Guido and Wunderlich, Hans-Joachim},
  title = {{Parallel Simulation of Apoptotic Receptor-Clustering on GPGPU Many-Core Architectures}},
  booktitle = {Proceedings of the IEEE International Conference on Bioinformatics and Biomedicine (BIBM'12)},
  year = {2012},
  pages = {1--6},
  keywords = {GPGPU; parallel particle simulation; numerical modeling; apoptosis; receptor-clustering},
  abstract = {Apoptosis, the programmed cell death, is a physiological process that handles the removal of unwanted or damaged cells in living organisms. The process itself is initiated by signaling through tumor necrosis factor (TNF) receptors and ligands, which form clusters on the cell membrane. The exact function of this process is not yet fully understood and currently subject of basic research. Different mathematical models have been developed to describe and simulate the apoptotic receptor-clustering.
In this interdisciplinary work, a previously introduced model of the apoptotic receptor-clustering has been extended by a new receptor type to allow a more precise description and simulation of the signaling process. Due to the high computational requirements of the model, an ef?cient algorithmic mapping to a modern many-core GPGPU architecture has been developed. Such architectures enable high-performance computing (HPC) simulation tasks on the desktop at low costs. The developed mapping reduces average simulation times from months to days (peak speedup of 256x), allowing the productive use of the model in research.}, doi = {http://dx.doi.org/10.1109/BIBM.2012.6392661}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2012/BIBM_BraunDSSW2012.pdf} }
192. Structural Test and Diagnosis for Graceful Degradation of NoC Switches
Dalirsani, A., Holst, S., Elm, M. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 28(6), October 2012, pp. 831-841
2012
DOI PDF 
Keywords: Network-on-Chip, Graceful Degradation, Logic Diagnosis, Performability
Abstract: Networks-on-Chip (NoCs) are implicitly fault tolerant and due to their inherent redundancy they can overcome defective cores, links and switches. This effect can be used to increase yield at the cost of reduced performance. In this paper, a new diagnosis method based on the standard flow of industrial volume testing is presented, which is able to identify the intact functions of a defective network switch rather than providing only a pass/fail result for the complete switch. To achieve this, the new method combines for the first time the precision of structural testing with information on the functional behavior in the presence of defects. This allows to disable defective parts of a switch after production test and use the intact functions. Thereby, only a minimum performance decrease is induced while the yield is increased. According to the experimental results, the method improves the performability of NoCs since 56.86 % and 72.42 % of defects in two typical switch models only impair one switch port. Unlike previous methods for implementing fault tolerant switches, the developed technique does not impose any additional area overhead and is compatible with many common switch designs.
BibTeX:
@article{DalirHEW2012,
  author = {Dalirsani, Atefe and Holst, Stefan and Elm, Melanie and Wunderlich, Hans-Joachim},
  title = {{Structural Test and Diagnosis for Graceful Degradation of NoC Switches}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {2012},
  volume = {28},
  number = {6},
  pages = {831--841},
  keywords = {Network-on-Chip, Graceful Degradation, Logic Diagnosis, Performability},
  abstract = {Networks-on-Chip (NoCs) are implicitly fault tolerant and due to their inherent redundancy they can overcome defective cores, links and switches. This effect can be used to increase yield at the cost of reduced performance. In this paper, a new diagnosis method based on the standard flow of industrial volume testing is presented, which is able to identify the intact functions of a defective network switch rather than providing only a pass/fail result for the complete switch. To achieve this, the new method combines for the first time the precision of structural testing with information on the functional behavior in the presence of defects. This allows to disable defective parts of a switch after production test and use the intact functions. Thereby, only a minimum performance decrease is induced while the yield is increased. According to the experimental results, the method improves the performability of NoCs since 56.86 % and 72.42 % of defects in two typical switch models only impair one switch port. Unlike previous methods for implementing fault tolerant switches, the developed technique does not impose any additional area overhead and is compatible with many common switch designs.},
  doi = {http://dx.doi.org/10.1007/s10836-012-5329-9},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/JETTA_DalirHEW2012.pdf}
}
191. Acceleration of Monte-Carlo Molecular Simulations on Hybrid Computing Architectures
Braun, C., Holst, S., Wunderlich, H.-J., Castillo, J.M. and Gross, J.
Proceedings of the 30th IEEE International Conference on Computer Design (ICCD'12), Montreal, Canada, 30 September-3 October 2012, pp. 207-212
2012
DOI PDF 
Keywords: Hybrid Computer Architectures; GPGPU; Markov-Chain Monte-Carlo; Molecular Simulation; Thermodynamics
Abstract: Markov-Chain Monte-Carlo (MCMC) methods are an important class of simulation techniques, which execute a sequence of simulation steps, where each new step depends on the previous ones. Due to this fundamental dependency, MCMC methods are inherently hard to parallelize on any architecture. The upcoming generations of hybrid CPU/GPGPU architectures with their multi-core CPUs and tightly coupled many-core GPGPUs provide new acceleration opportunities especially for MCMC methods, if the new degrees of freedom are exploited correctly.
In this paper, the outcomes of an interdisciplinary collaboration are presented, which focused on the parallel mapping of a MCMC molecular simulation from thermodynamics to hybrid CPU/GPGPU computing systems. While the mapping is designed for upcoming hybrid architectures, the implementation of this approach on an NVIDIA Tesla system already leads to a substantial speedup of more than 87x despite the additional communication overheads.
BibTeX:
@inproceedings{BraunHWCG2012,
  author = {Braun, Claus and Holst, Stefan and Wunderlich, Hans-Joachim and Castillo, Juan Manuel and Gross, Joachim},
  title = {{Acceleration of Monte-Carlo Molecular Simulations on Hybrid Computing Architectures}},
  booktitle = {Proceedings of the 30th IEEE International Conference on Computer Design (ICCD'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {207--212},
  keywords = {Hybrid Computer Architectures; GPGPU; Markov-Chain Monte-Carlo; Molecular Simulation; Thermodynamics},
  abstract = {Markov-Chain Monte-Carlo (MCMC) methods are an important class of simulation techniques, which execute a sequence of simulation steps, where each new step depends on the previous ones. Due to this fundamental dependency, MCMC methods are inherently hard to parallelize on any architecture. The upcoming generations of hybrid CPU/GPGPU architectures with their multi-core CPUs and tightly coupled many-core GPGPUs provide new acceleration opportunities especially for MCMC methods, if the new degrees of freedom are exploited correctly. 
In this paper, the outcomes of an interdisciplinary collaboration are presented, which focused on the parallel mapping of a MCMC molecular simulation from thermodynamics to hybrid CPU/GPGPU computing systems. While the mapping is designed for upcoming hybrid architectures, the implementation of this approach on an NVIDIA Tesla system already leads to a substantial speedup of more than 87x despite the additional communication overheads.}, doi = {http://dx.doi.org/10.1109/ICCD.2012.6378642}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/ICCD_BraunHWCG2012.pdf} }
190. Transparent Structural Online Test for Reconfigurable Systems
Abdelfattah, M.S., Bauer, L., Braun, C., Imhof, M.E., Kochte, M.A., Zhang, H., Henkel, J. and Wunderlich, H.-J.
Proceedings of the 18th IEEE International On-Line Testing Symposium (IOLTS'12), Sitges, Spain, 27-29 June 2012, pp. 37-42
2012
DOI PDF 
Keywords: FPGA; Reconfigurable Architectures; Online Test
Abstract: FPGA-based reconfigurable systems allow the online adaptation to dynamically changing runtime requirements. However, the reliability of modern FPGAs is threatened by latent defects and aging effects. Hence, it is mandatory to ensure the reliable operation of the FPGA’s reconfigurable fabric. This can be achieved by periodic or on-demand online testing. In this paper, a system-integrated, transparent structural online test method for runtime reconfigurable systems is proposed. The required tests are scheduled like functional workloads, and thorough optimizations of the test overhead reduce the performance impact. The proposed scheme has been implemented on a reconfigurable system. The results demonstrate that thorough testing of the reconfigurable fabric can be achieved at negligible performance impact on the application.
BibTeX:
@inproceedings{AbdelBBIKZHW2012,
  author = {Abdelfattah, Mohamed S. and Bauer, Lars and Braun, Claus and Imhof, Michael E. and Kochte, Michael A. and Zhang, Hongyan and Henkel, Jörg and Wunderlich, Hans-Joachim},
  title = {{Transparent Structural Online Test for Reconfigurable Systems}},
  booktitle = {Proceedings of the 18th IEEE International On-Line Testing Symposium (IOLTS'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {37--42},
  keywords = {FPGA; Reconfigurable Architectures; Online Test},
  abstract = {FPGA-based reconfigurable systems allow the online adaptation to dynamically changing runtime requirements. However, the reliability of modern FPGAs is threatened by latent defects and aging effects. Hence, it is mandatory to ensure the reliable operation of the FPGA’s reconfigurable fabric. This can be achieved by periodic or on-demand online testing. In this paper, a system-integrated, transparent structural online test method for runtime reconfigurable systems is proposed. The required tests are scheduled like functional workloads, and thorough optimizations of the test overhead reduce the performance impact. The proposed scheme has been implemented on a reconfigurable system. The results demonstrate that thorough testing of the reconfigurable fabric can be achieved at negligible performance impact on the application.},
  doi = {http://dx.doi.org/10.1109/IOLTS.2012.6313838},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/IOLTS_AbdelBBIKZHW2012.pdf}
}
189. OTERA: Online Test Strategies for Reliable Reconfigurable Architectures
Bauer, L., Braun, C., Imhof, M.E., Kochte, M.A., Zhang, H., Wunderlich, H.-J. and Henkel, J.
Proceedings of the NASA/ESA Conference on Adaptive Hardware and Systems (AHS'12), Erlangen, Germany, 25-28 June 2012, pp. 38-45
2012
DOI PDF 
Abstract: FPGA-based reconfigurable systems allow the online adaptation to dynamically changing runtime requirements. However, the reliability of FPGAs, which are manufactured in latest technologies, is threatened not only by soft errors, but also by aging effects and latent defects. To ensure reliable reconfiguration, it is mandatory to guarantee the correct operation of the underlying reconfigurable fabric. This can be achieved by periodic or on-demand online testing. The OTERA project develops and evaluates components and strategies for reconfigurable systems that feature reliable reconfiguration. The research focus ranges from structural online tests for the FPGA infrastructure and functional online tests for the configured functionality up to the resource management and test scheduling. This paper gives an overview of the project tasks and presents first results.
BibTeX:
@inproceedings{BauerBIKZWH2012,
  author = {Bauer, Lars and Braun, Claus and Imhof, Michael E. and Kochte, Michael A. and Zhang, Hongyan and Wunderlich, Hans-Joachim and Henkel, Jörg},
  title = {{OTERA: Online Test Strategies for Reliable Reconfigurable Architectures}},
  booktitle = {Proceedings of the NASA/ESA Conference on Adaptive Hardware and Systems (AHS'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {38--45},
  abstract = {FPGA-based reconfigurable systems allow the online adaptation to dynamically changing runtime requirements. However, the reliability of FPGAs, which are manufactured in latest technologies, is threatened not only by soft errors, but also by aging effects and latent defects. To ensure reliable reconfiguration, it is mandatory to guarantee the correct operation of the underlying reconfigurable fabric. This can be achieved by periodic or on-demand online testing. The OTERA project develops and evaluates components and strategies for reconfigurable systems that feature reliable reconfiguration. The research focus ranges from structural online tests for the FPGA infrastructure and functional online tests for the configured functionality up to the resource management and test scheduling. This paper gives an overview of the project tasks and presents first results.},
  doi = {http://dx.doi.org/10.1109/AHS.2012.6268667},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/AHS_BauerBIKZWH2012.pdf}
}
188. Built-in Self-Diagnosis Exploiting Strong Diagnostic Windows in Mixed-Mode Test
Cook, A., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 17th IEEE European Test Symposium (ETS'12), Annecy, France, 28 May-1 June 2012, pp. 146-151
2012
DOI PDF 
Keywords: Built-in Diagnosis; Design for Diagnosis
Abstract: Efficient diagnosis procedures are crucial both for volume and for in-field diagnosis. In either case the underlying test strategy should provide a high coverage of realistic fault mechanisms and support a low-cost implementation. Built-in self-diagnosis (BISD) is a promising solution, if the diagnosis procedure is fully in line with the test flow. However, most known BISD schemes require multiple test runs or modifications of the standard scan-based test infrastructure. Some recent schemes circumvent these problems, but they focus on deterministic patterns to limit the storage requirements for diagnostic data. Thus, they cannot exploit the benefits of a mixed-mode test such as high coverage of non-target faults and reduced test data storage. This paper proposes a BISD scheme using mixed-mode patterns and partitioning the test sequence into “weak” and “strong” diagnostic windows, which are treated differently during diagnosis. As the experimental results show, this improves the coverage of non-target faults and enhances the diagnostic resolution compared to state-of-the-art approaches. At the same time the overall storage overhead for input and response data is considerably reduced.
BibTeX:
@inproceedings{CookHW2012,
  author = {Cook, Alejandro and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Built-in Self-Diagnosis Exploiting Strong Diagnostic Windows in Mixed-Mode Test}},
  booktitle = {Proceedings of the 17th IEEE European Test Symposium (ETS'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {146--151},
  keywords = {Built-in Diagnosis; Design for Diagnosis},
  abstract = {Efficient diagnosis procedures are crucial both for volume and for in-field diagnosis. In either case the underlying test strategy should provide a high coverage of realistic fault mechanisms and support a low-cost implementation. Built-in self-diagnosis (BISD) is a promising solution, if the diagnosis procedure is fully in line with the test flow. However, most known BISD schemes require multiple test runs or modifications of the standard scan-based test infrastructure. Some recent schemes circumvent these problems, but they focus on deterministic patterns to limit the storage requirements for diagnostic data. Thus, they cannot exploit the benefits of a mixed-mode test such as high coverage of non-target faults and reduced test data storage. This paper proposes a BISD scheme using mixed-mode patterns and partitioning the test sequence into “weak” and “strong” diagnostic windows, which are treated differently during diagnosis. As the experimental results show, this improves the coverage of non-target faults and enhances the diagnostic resolution compared to state-of-the-art approaches. At the same time the overall storage overhead for input and response data is considerably reduced.},
  doi = {http://dx.doi.org/10.1109/ETS.2012.6233025},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/ETS_CookHW2012.pdf}
}
187. Efficient System-Level Aging Prediction
Hatami, N., Baranowski, R., Prinetto, P. and Wunderlich, H.-J.
Proceedings of the 17th IEEE European Test Symposium (ETS'12), Annecy, France, 28 May-1 June 2012, pp. 164-169
2012
DOI PDF 
Keywords: Non-functional properties; Transaction Level Modeling (TLM); mixed-level simulation; aging analysis; Negative Bias Temperature Instability (NBTI)
Abstract: Non-functional properties (NFPs) of integrated circuits include reliability, vulnerability, power consumption or heat dissipation. Accurate NFP prediction over long periods of system operation poses a great challenge due to prohibitive simulation costs. For instance, in case of aging estimation, the existing low-level models are accurate but not efficient enough for simulation of complex designs. On the other hand, existing techniques for fast high-level simulation do not provide enough details for NFP analysis. The goal of this paper is to bridge this gap by combining the accuracy of low-level models with high-level simulation speed. We introduce an efficient mixed-level NFP prediction methodology that considers both the structure and application of a system. The system is modeled at transaction-level to enable high simulation speed. To maintain accuracy, NFP assessment for cores under analysis is conducted at gate-level by cycle-accurate simulation. We propose effective techniques for cross-level synchronization and idle simulation speed-up. As an example, we apply the technique to analyze aging caused by Negative Bias Temperature Instability in order to identify reliability hot spots. As case studies, several applications on an SoC platform are analyzed. Compared to conventional approaches, the proposed method is from 7 up to 400 times faster with mean error below 0.006%.
BibTeX:
@inproceedings{HatamBPW2012,
  author = {Hatami, Nadereh and Baranowski, Rafal and Prinetto, Paolo and Wunderlich, Hans-Joachim},
  title = {{Efficient System-Level Aging Prediction}},
  booktitle = {Proceedings of the 17th IEEE European Test Symposium (ETS'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {164--169},
  keywords = {Non-functional properties; Transaction Level Modeling (TLM); mixed-level simulation; aging analysis; Negative Bias Temperature Instability (NBTI)},
  abstract = {Non-functional properties (NFPs) of integrated circuits include reliability, vulnerability, power consumption or heat dissipation. Accurate NFP prediction over long periods of system operation poses a great challenge due to prohibitive simulation costs. For instance, in case of aging estimation, the existing low-level models are accurate but not efficient enough for simulation of complex designs. On the other hand, existing techniques for fast high-level simulation do not provide enough details for NFP analysis. The goal of this paper is to bridge this gap by combining the accuracy of low-level models with high-level simulation speed. We introduce an efficient mixed-level NFP prediction methodology that considers both the structure and application of a system. The system is modeled at transaction-level to enable high simulation speed. To maintain accuracy, NFP assessment for cores under analysis is conducted at gate-level by cycle-accurate simulation. We propose effective techniques for cross-level synchronization and idle simulation speed-up. As an example, we apply the technique to analyze aging caused by Negative Bias Temperature Instability in order to identify reliability hot spots. As case studies, several applications on an SoC platform are analyzed. Compared to conventional approaches, the proposed method is from 7 up to 400 times faster with mean error below 0.006%.},
  doi = {http://dx.doi.org/10.1109/ETS.2012.6233028},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/ETS_HatamBPW2012.pdf}
}
186. Exact Stuck-at Fault Classification in Presence of Unknowns
Hillebrecht, S., Kochte, M.A., Wunderlich, H.-J. and Becker, B.
Proceedings of the 17th IEEE European Test Symposium (ETS'12), Annecy, France, 28 May-1 June 2012, pp. 98-103
2012
DOI PDF 
Keywords: Unknown values; simulation pessimism; exact fault simulation; SAT
Abstract: Fault simulation is an essential tool in electronic design automation. The accuracy of the computation of fault coverage in classic n-valued simulation algorithms is compromised by unknown (X) values. This results in a pessimistic underestimation of the coverage, and overestimation of unknown (X) values at the primary and pseudo-primary outputs. This work proposes the first stuck-at fault simulation algorithm free of any simulation pessimism in presence of unknowns. The SAT-based algorithm exactly classifies any fault and distinguishes between definite and possible detects. The pessimism w. r. t. unknowns present in classic algorithms is discussed in the experimental results on ISCAS benchmark and industrial circuits. The applicability of our algorithm to large industrial circuits is demonstrated.
BibTeX:
@inproceedings{HilleKWB2012,
  author = {Hillebrecht, Stefan and Kochte, Michael A. and Wunderlich, Hans-Joachim and Becker, Bernd},
  title = {{Exact Stuck-at Fault Classification in Presence of Unknowns}},
  booktitle = {Proceedings of the 17th IEEE European Test Symposium (ETS'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {98--103},
  keywords = {Unknown values; simulation pessimism; exact fault simulation; SAT},
  abstract = {Fault simulation is an essential tool in electronic design automation. The accuracy of the computation of fault coverage in classic n-valued simulation algorithms is compromised by unknown (X) values. This results in a pessimistic underestimation of the coverage, and overestimation of unknown (X) values at the primary and pseudo-primary outputs. This work proposes the first stuck-at fault simulation algorithm free of any simulation pessimism in presence of unknowns. The SAT-based algorithm exactly classifies any fault and distinguishes between definite and possible detects. The pessimism w. r. t. unknowns present in classic algorithms is discussed in the experimental results on ISCAS benchmark and industrial circuits. The applicability of our algorithm to large industrial circuits is demonstrated.},
  doi = {http://dx.doi.org/10.1109/ETS.2012.6233017},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/ETS_HilleKWB2012.pdf}
}
185. A Pseudo-Dynamic Comparator for Error Detection in Fault Tolerant Architectures
Tran, D.A., Virazel, A., Bosio, A., Dilillo, L., Girard, P., Todri, A., Imhof, M.E. and Wunderlich, H.-J.
Proceedings of the 30th IEEE VLSI Test Symposium (VTS'12), Hyatt Maui, Hawaii, USA, 23-25 April 2012, pp. 50-55
2012
DOI PDF 
Keywords: Robustness; Soft error; Timing error; Fault tolerance; Duplication; Comparison; Power consumption
Abstract: Although CMOS technology scaling offers many advantages, it suffers from robustness problem caused by hard, soft and timing errors. The robustness of future CMOS technology nodes must be improved and the use of fault tolerant architectures is probably the most viable solution. In this context, Duplication/Comparison scheme is widely used for error detection. Traditionally, this scheme uses a static comparator structure that detects hard error. However, it is not effective for soft and timing errors detection due to the possible masking of glitches by the comparator itself. To solve this problem, we propose a pseudo-dynamic comparator architecture that combines a dynamic CMOS transition detector and a static comparator. Experimental results show that the proposed comparator detects not only hard errors but also small glitches related to soft and timing errors. Moreover, its dynamic characteristics allow reducing the power consumption while keeping an equivalent silicon area compared to a static comparator. This study is the first step towards a full fault tolerant approach targeting robustness improvement of CMOS logic circuits.
BibTeX:
@inproceedings{TranVBDGTIW2012,
  author = {Tran, Duc Anh and Virazel, Arnaud and Bosio, Alberto and Dilillo, Luigi and Girard, Patrick and Todri, Aida and Imhof, Michael E. and Wunderlich, Hans-Joachim},
  title = {{A Pseudo-Dynamic Comparator for Error Detection in Fault Tolerant Architectures}},
  booktitle = {Proceedings of the 30th IEEE VLSI Test Symposium (VTS'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {50--55},
  keywords = {Robustness; Soft error; Timing error; Fault tolerance; Duplication; Comparison; Power consumption},
  abstract = {Although CMOS technology scaling offers many advantages, it suffers from robustness problem caused by hard, soft and timing errors. The robustness of future CMOS technology nodes must be improved and the use of fault tolerant architectures is probably the most viable solution. In this context, Duplication/Comparison scheme is widely used for error detection. Traditionally, this scheme uses a static comparator structure that detects hard error. However, it is not effective for soft and timing errors detection due to the possible masking of glitches by the comparator itself. To solve this problem, we propose a pseudo-dynamic comparator architecture that combines a dynamic CMOS transition detector and a static comparator. Experimental results show that the proposed comparator detects not only hard errors but also small glitches related to soft and timing errors. Moreover, its dynamic characteristics allow reducing the power consumption while keeping an equivalent silicon area compared to a static comparator. This study is the first step towards a full fault tolerant approach targeting robustness improvement of CMOS logic circuits.},
  doi = {http://dx.doi.org/10.1109/VTS.2012.6231079},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/VTS_TranVBDGTIW2012.pdf}
}
184. Built-in Self-Diagnosis Targeting Arbitrary Defects with Partial Pseudo-Exhaustive Test
Cook, A., Hellebrand, S., Imhof, M.E., Mumtaz, A. and Wunderlich, H.-J.
Proceedings of the 13th IEEE Latin-American Test Workshop (LATW'12), Quito, Ecuador, 10-13 April 2012, pp. 1-4
2012
DOI PDF 
Keywords: Built-in Self-Test; Pseudo-Exhaustive Test; Built-in Self-Diagnosis
Abstract: Pseudo-exhaustive test completely verifies all output functions of a combinational circuit, which provides a high coverage of non-target faults and allows an efficient on-chip implementation. To avoid long test times caused by large output cones, partial pseudo-exhaustive test (P-PET) has been proposed recently. Here only cones with a limited number of inputs are tested exhaustively, and the remaining faults are targeted with deterministic patterns. Using P-PET patterns for built-in diagnosis, however, is challenging because of the large amount of associated response data. This paper presents a built-in diagnosis scheme which only relies on sparsely distributed data in the response sequence, but still preserves the benefits of P-PET.
BibTeX:
@inproceedings{CookHIMW2012,
  author = {Cook, Alejandro and Hellebrand, Sybille and Imhof, Michael E. and Mumtaz, Abdullah and Wunderlich, Hans-Joachim},
  title = {{Built-in Self-Diagnosis Targeting Arbitrary Defects with Partial Pseudo-Exhaustive Test}},
  booktitle = {Proceedings of the 13th IEEE Latin-American Test Workshop (LATW'12)},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {1--4},
  keywords = {Built-in Self-Test; Pseudo-Exhaustive Test; Built-in Self-Diagnosis},
  abstract = {Pseudo-exhaustive test completely verifies all output functions of a combinational circuit, which provides a high coverage of non-target faults and allows an efficient on-chip implementation. To avoid long test times caused by large output cones, partial pseudo-exhaustive test (P-PET) has been proposed recently. Here only cones with a limited number of inputs are tested exhaustively, and the remaining faults are targeted with deterministic patterns. Using P-PET patterns for built-in diagnosis, however, is challenging because of the large amount of associated response data. This paper presents a built-in diagnosis scheme which only relies on sparsely distributed data in the response sequence, but still preserves the benefits of P-PET.},
  doi = {http://dx.doi.org/10.1109/LATW.2012.6261229},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2012/LATW_CookHIMW2012.pdf}
}
183. Diagnostic Test of Robust Circuits
Cook, A., Hellebrand, S., Indlekofer, T. and Wunderlich, H.-J.
Proceedings of the 20th IEEE Asian Test Symposium (ATS'11), New Delhi, India, 20-23 November 2011, pp. 285-290
2011
DOI PDF 
Keywords: Robust Circuits; Built-in Self-Test; Built-in Self-Diagnosis; Time Redundancy
Abstract: Robust circuits are able to tolerate certain faults, but also pose additional challenges for test and diagnosis. To improve yield, the test must distinguish between critical faults
and such faults, that could be compensated during system operation; in addition, efficient diagnosis procedures are needed to support yield ramp-up in the case of critical faults. Previous
work on circuits with time redundancy has shown that “signature rollback” can distinguish critical permanent faults from uncritical transient faults. The test is partitioned into shorter
sessions, and a rollback is triggered immediately after a faulty session. If the repeated session shows the correct result, then a transient fault is assumed. The reference values for the sessions are represented in a very compact format. Storing only a few bits characterizing the MISR state over time can provide the same quality as storing the complete signature. In this work
the signature rollback scheme is extended to an integrated test and diagnosis procedure. It is shown that a single test run with highly compacted reference data is sufficient to reach a comparable diagnostic resolution to that of a diagnostic session without any data compaction.
BibTeX:
@inproceedings{CookHIW2011,
  author = {Cook, Alejandro and Hellebrand, Sybille and Indlekofer, Thomas and Wunderlich, Hans-Joachim},
  title = {{Diagnostic Test of Robust Circuits}},
  booktitle = {Proceedings of the 20th IEEE Asian Test Symposium (ATS'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {285--290},
  keywords = {Robust Circuits; Built-in Self-Test; Built-in Self-Diagnosis; Time Redundancy},
  abstract = {Robust circuits are able to tolerate certain faults, but also pose additional challenges for test and diagnosis. To improve yield, the test must distinguish between critical faults
and such faults, that could be compensated during system operation; in addition, efficient diagnosis procedures are needed to support yield ramp-up in the case of critical faults. Previous
work on circuits with time redundancy has shown that “signature rollback” can distinguish critical permanent faults from uncritical transient faults. The test is partitioned into shorter
sessions, and a rollback is triggered immediately after a faulty session. If the repeated session shows the correct result, then a transient fault is assumed. The reference values for the sessions are represented in a very compact format. Storing only a few bits characterizing the MISR state over time can provide the same quality as storing the complete signature. In this work
the signature rollback scheme is extended to an integrated test and diagnosis procedure. It is shown that a single test run with highly compacted reference data is sufficient to reach a comparable diagnostic resolution to that of a diagnostic session without any data compaction.}, doi = {http://dx.doi.org/10.1109/ATS.2011.55}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2011/ATS_CookHIW2011.pdf} }
182. Efficient BDD-based Fault Simulation in Presence of Unknown Values
Kochte, M.A., Kundu, S., Miyase, K., Wen, X. and Wunderlich, H.-J.
Proceedings of the 20th IEEE Asian Test Symposium (ATS'11), New Delhi, India, 20-23 November 2011, pp. 383-388
2011
DOI PDF 
Keywords: Unknown values; X propagation; precise fault simulation; symbolic simulation; BDD
Abstract: Unknown (X) values, originating from memories, clock domain boundaries or A/D interfaces, may compromise test signatures and fault coverage. Classical logic and fault simulation
algorithms are pessimistic w.r.t. the propagation of X values in the circuit. This work proposes efficient hybrid logic and stuck-at fault simulation algorithms which combine heuristics and local
BDDs to increase simulation accuracy. Experimental results on benchmark and large industrial circuits show significantly increased fault coverage and low runtime. The achieved simulation
precision is quantified for the first time.
BibTeX:
@inproceedings{KochtKMWW2011,
  author = {Kochte, Michael A. and Kundu, S. and Miyase, Kohei and Wen, Xiaoqing and Wunderlich, Hans-Joachim},
  title = {{Efficient BDD-based Fault Simulation in Presence of Unknown Values}},
  booktitle = {Proceedings of the 20th IEEE Asian Test Symposium (ATS'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {383--388},
  keywords = {Unknown values; X propagation; precise fault simulation; symbolic simulation; BDD},
  abstract = {Unknown (X) values, originating from memories, clock domain boundaries or A/D interfaces, may compromise test signatures and fault coverage. Classical logic and fault simulation
algorithms are pessimistic w.r.t. the propagation of X values in the circuit. This work proposes efficient hybrid logic and stuck-at fault simulation algorithms which combine heuristics and local
BDDs to increase simulation accuracy. Experimental results on benchmark and large industrial circuits show significantly increased fault coverage and low runtime. The achieved simulation
precision is quantified for the first time.}, doi = {http://dx.doi.org/10.1109/ATS.2011.52}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2011/ATS_KochtKMWW2011.pdf} }
181. Embedded Test for Highly Accurate Defect Localization
Mumtaz, A., Imhof, M.E., Holst, S. and Wunderlich, H.-J.
Proceedings of the 20th IEEE Asian Test Symposium (ATS'11), New Delhi, India, 20-23 November 2011, pp. 213-218
2011
DOI PDF 
Keywords: BIST; Pseudo-Exhaustive Testing; Diagnosis; Debug
Abstract: Modern diagnosis algorithms are able to identify the defective circuit structure directly from existing fail data without being limited to any specialized fault models. Such algorithms however require test patterns with a high defect coverage, posing a major challenge particularly for embedded testing.
In mixed-mode embedded test, a large amount of pseudorandom (PR) patterns are applied prior to deterministic test pattern. Partial Pseudo-Exhaustive Testing (P-PET) replaces these pseudo-random patterns during embedded testing by partial pseudo-exhaustive patterns to test a large portion of a circuit fault-model independently. The overall defect coverage is optimized compared to random testing or deterministic tests using the stuck-at fault model while maintaining a comparable hardware overhead and the same test application time.
This work for the first time combines P-PET with a fault model independent diagnosis algorithm and shows that arbitrary defects can be diagnosed on average much more precisely than with standard embedded testing. The results are compared to random pattern testing and deterministic testing targeting stuck-at faults.
BibTeX:
@inproceedings{MumtaIHW2011,
  author = {Mumtaz, Abdullah and Imhof, Michael E. and Holst, Stefan and Wunderlich, Hans-Joachim},
  title = {{Embedded Test for Highly Accurate Defect Localization}},
  booktitle = {Proceedings of the 20th IEEE Asian Test Symposium (ATS'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {213--218},
  keywords = {BIST; Pseudo-Exhaustive Testing; Diagnosis; Debug},
  abstract = {Modern diagnosis algorithms are able to identify the defective circuit structure directly from existing fail data without being limited to any specialized fault models. Such algorithms however require test patterns with a high defect coverage, posing a major challenge particularly for embedded testing.
In mixed-mode embedded test, a large amount of pseudorandom (PR) patterns are applied prior to deterministic test pattern. Partial Pseudo-Exhaustive Testing (P-PET) replaces these pseudo-random patterns during embedded testing by partial pseudo-exhaustive patterns to test a large portion of a circuit fault-model independently. The overall defect coverage is optimized compared to random testing or deterministic tests using the stuck-at fault model while maintaining a comparable hardware overhead and the same test application time.
This work for the first time combines P-PET with a fault model independent diagnosis algorithm and shows that arbitrary defects can be diagnosed on average much more precisely than with standard embedded testing. The results are compared to random pattern testing and deterministic testing targeting stuck-at faults.}, doi = {http://dx.doi.org/10.1109/ATS.2011.60}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2011/ATS_MumtaIHW2011.pdf} }
180. A Hybrid Fault Tolerant Architecture for Robustness Improvement of Digital Circuits
Tran, D.A., Virazel, A., Bosio, A., Dilillo, L., Girard, P., Pravossoudovitch, S. and Wunderlich, H.-J.
Proceedings of the 20th IEEE Asian Test Symposium (ATS'11), New Delhi, India, 20-23 November 2011
2011
DOI PDF 
Keywords: transient error; permanent error; robustness; fault tolerance; TMR; power consumption; aging phenomenon
Abstract: In this paper, a novel hybrid fault tolerant architecture for digital circuits is proposed in order to enable the use of future CMOS technology nodes. This architecture targets robustness,
power consumption and yield at the same time, at area costs comparable to standard fault tolerance schemes. The architecture increases circuit robustness by tolerating both
transient and permanent online faults. It consumes less power than the classical Triple Modular Redundancy (TMR) approach while utilizing comparable silicon area. It overcomes many
permanent faults occurring throughout manufacturing while still tolerating soft errors introduced by particle strikes. These can be done by using scalable redundancy resources, while keeping the hardened combinational logic circuits intact. The technique combines different types of redundancy: information redundancy for error detection, temporal redundancy for soft error
correction and hardware redundancy for hard error tolerance. Results on largest ISCAS and ITC benchmark circuits show that our approach has an area cost negligible of about 2% to 3% with
a power consumption saving of about 30% compared to TMR. Finally, it deals with aging phenomenon and thus, increases the expected lifetime of logic circuits.
BibTeX:
@inproceedings{TranVBDGPW2011,
  author = {Tran, Duc Anh and Virazel, Arnaud and Bosio, Alberto and Dilillo, Luigi and Girard, Patrick and Pravossoudovitch, Serge and Wunderlich, Hans-Joachim},
  title = {{A Hybrid Fault Tolerant Architecture for Robustness Improvement of Digital Circuits}},
  booktitle = {Proceedings of the 20th IEEE Asian Test Symposium (ATS'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  keywords = {transient error; permanent error; robustness; fault tolerance; TMR; power consumption; aging phenomenon},
  abstract = {In this paper, a novel hybrid fault tolerant architecture for digital circuits is proposed in order to enable the use of future CMOS technology nodes. This architecture targets robustness,
power consumption and yield at the same time, at area costs comparable to standard fault tolerance schemes. The architecture increases circuit robustness by tolerating both
transient and permanent online faults. It consumes less power than the classical Triple Modular Redundancy (TMR) approach while utilizing comparable silicon area. It overcomes many
permanent faults occurring throughout manufacturing while still tolerating soft errors introduced by particle strikes. These can be done by using scalable redundancy resources, while keeping the hardened combinational logic circuits intact. The technique combines different types of redundancy: information redundancy for error detection, temporal redundancy for soft error
correction and hardware redundancy for hard error tolerance. Results on largest ISCAS and ITC benchmark circuits show that our approach has an area cost negligible of about 2% to 3% with
a power consumption saving of about 30% compared to TMR. Finally, it deals with aging phenomenon and thus, increases the expected lifetime of logic circuits.}, doi = {http://dx.doi.org/10.1109/ATS.2011.89}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2011/ATS_TranVBDGPW2011.pdf} }
179. Design and Architectures for Dependable Embedded Systems
Henkel, J., Bauer, L., Becker, J., Bringmann, O., Brinkschulte, U., Chakraborty, S., Engel, M., Ernst, R., Härtig, H., Hedrich, L., Herkersdorf, A., Kapitza, R., Lohmann, D., Marwedel, P., Platzner, M., Rosenstiel, W., Schlichtmann, U., Spinczyk, O., Tahoori, M., Teich, J., Wehn, N. and Wunderlich, H.-J.
Proceedings of the 9th IEEE/ACM/IFIP international conference on Hardware/software codesign and system synthesis (CODES+ISSS'11), Taipei, Taiwan, 9-14 October 2011, pp. 69-78
2011
DOI URL PDF 
Keywords: Resilience; Fault-Tolerance; Embedded Systems; MPSoCs; Dependability
Abstract: The paper presents an overview of a major research project on dependable embedded systems that has started in Fall 2010 and is running for a projected duration of six years. Aim is a 'dependability co-design' that spans various levels of abstraction in the design process of embedded systems starting from gate level through operating system, applications software to system architecture. In addition, we present a new classification on faults, errors, and failures.
BibTeX:
@inproceedings{HenkeBBBBCEEHHHKLMPRSSTTWW2011,
  author = {Henkel, Jörg and Bauer, Lars and Becker, Joachim and Bringmann, Oliver and Brinkschulte, Uwe and Chakraborty, Samarjit and Engel, Michael and Ernst, Rolf and Härtig, Hermann and Hedrich, Lars and Herkersdorf, Andreas and Kapitza, Rüdiger and Lohmann, Daniel and Marwedel, Peter and Platzner, Marco and Rosenstiel, Wolfgang and Schlichtmann, Ulf and Spinczyk, Olaf and Tahoori, Mehdi and Teich, Jürgen and Wehn, Norbert and Wunderlich, Hans-Joachim},
  title = {{Design and Architectures for Dependable Embedded Systems}},
  booktitle = {Proceedings of the 9th IEEE/ACM/IFIP international conference on Hardware/software codesign and system synthesis (CODES+ISSS'11)},
  publisher = {ACM},
  year = {2011},
  pages = {69--78},
  keywords = {Resilience; Fault-Tolerance; Embedded Systems; MPSoCs; Dependability},
  abstract = {The paper presents an overview of a major research project on dependable embedded systems that has started in Fall 2010 and is running for a projected duration of six years. Aim is a 'dependability co-design' that spans various levels of abstraction in the design process of embedded systems starting from gate level through operating system, applications software to system architecture. In addition, we present a new classification on faults, errors, and failures.},
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=6062320},
  doi = {http://dx.doi.org/10.1145/2039370.2039384},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2011/CODES+ISSS_HenkeBBBBCEEHHHKLMPRSSTTWW2011.pdf}
}
178. Robuster Selbsttest mit Diagnose
Cook, A., Hellebrand, S., Indlekofer, T. and Wunderlich, H.-J.
5. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'11)
Vol. 231, Hamburg-Harburg, Germany, 27-29 September 2011, pp. 48-53
2011
URL PDF 
Abstract: Robuste Schaltungen können bestimmte Fehler tolerieren, stellen aber auch besonders hohe Anforderungen an Test und Diagnose. Um Ausbeuteverluste zu vermeiden, muss der Test kritische Fehler von unkritischen Fehlern unterscheiden, die sich während des Systembetriebs nicht auswirken. Zur Verbesserung des Produktionsprozesses muss außerdem eine effiziente Diagnose für erkannte kritische Fehler unterstützt werden. Bisherige Arbeiten für Schaltungen mit Zeitredundanz haben gezeigt, dass ein Selbsttest mit Rücksetzpunkten kostengünstig kritische permanente Fehler von unkritischen transienten Fehlern unterscheiden kann. Hier wird der Selbsttest in N Sitzungen unterteilt, die bei einem Fehler sofort wiederholt werden. Tritt beim zweiten Durchlauf einer Sitzung kein Fehler mehr auf, geht man von einem transienten Fehler aus. Dabei genügt es, die Referenzantworten für die einzelnen Sitzungen in stark kompaktierter Form abzulegen. Statt einer vollständigen Signatur wird nur eine kurze Bitfolge gespeichert, welche die Signaturberechnung über mehrere Zeitpunkte hinweg charakterisiert. Die vorliegende Arbeit erweitert das Testen mit Rücksetzpunkten zu einem integrierten Test- und Diagnoseprozess. Es wird gezeigt, dass ein einziger Testdurchlauf mit stark kompaktierten Referenzwerten genügt, um eine vergleichbare diagnostische Auflösung zu erreichen wie bei einem Test ohne Antwortkompaktierung.
BibTeX:
@inproceedings{CookHIW2011a,
  author = {Cook, Alejandro and Hellebrand, Sybille and Indlekofer, Thomas and Wunderlich, Hans-Joachim},
  title = {{Robuster Selbsttest mit Diagnose}},
  booktitle = {5. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'11)},
  publisher = {VDE VERLAG GMBH},
  year = {2011},
  volume = {231},
  pages = {48--53},
  abstract = {Robuste Schaltungen können bestimmte Fehler tolerieren, stellen aber auch besonders hohe Anforderungen an Test und Diagnose. Um Ausbeuteverluste zu vermeiden, muss der Test kritische Fehler von unkritischen Fehlern unterscheiden, die sich während des Systembetriebs nicht auswirken. Zur Verbesserung des Produktionsprozesses muss außerdem eine effiziente Diagnose für erkannte kritische Fehler unterstützt werden. Bisherige Arbeiten für Schaltungen mit Zeitredundanz haben gezeigt, dass ein Selbsttest mit Rücksetzpunkten kostengünstig kritische permanente Fehler von unkritischen transienten Fehlern unterscheiden kann. Hier wird der Selbsttest in N Sitzungen unterteilt, die bei einem Fehler sofort wiederholt werden. Tritt beim zweiten Durchlauf einer Sitzung kein Fehler mehr auf, geht man von einem transienten Fehler aus. Dabei genügt es, die Referenzantworten für die einzelnen Sitzungen in stark kompaktierter Form abzulegen. Statt einer vollständigen Signatur wird nur eine kurze Bitfolge gespeichert, welche die Signaturberechnung über mehrere Zeitpunkte hinweg charakterisiert. Die vorliegende Arbeit erweitert das Testen mit Rücksetzpunkten zu einem integrierten Test- und Diagnoseprozess. Es wird gezeigt, dass ein einziger Testdurchlauf mit stark kompaktierten Referenzwerten genügt, um eine vergleichbare diagnostische Auflösung zu erreichen wie bei einem Test ohne Antwortkompaktierung.},
  url = {http://www.vde-verlag.de/proceedings-en/453357011.html},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2011/ZUE_CookHIW2011.pdf}
}
177. Korrektur transienter Fehler in eingebetteten Speicherelementen
Imhof, M.E. and Wunderlich, H.-J.
5. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'11)
Vol. 231, Hamburg-Harburg, Germany, 27-29 September 2011, pp. 76-83
2011
URL PDF 
Keywords: Transiente Fehler; Soft Error; Single Event Upset (SEU); Erkennung; Lokalisierung; Korrektur; Latch; Register; Single Event Effect; Detection; Localization; Correction
Abstract: In der vorliegenden Arbeit wird ein Schema zur Korrektur von transienten Fehlern in eingebetteten, pegelgesteuerten Speicherelementen vorgestellt. Das Schema verwendet Struktur- und Informationsredundanz, um Single Event Upsets (SEUs) in Registern zu erkennen und zu korrigieren. Mit geringem Mehraufwand kann ein betroffenes Bit lokalisiert und mit einem hier vorgestellten Bit-Flipping-Latch (BFL) rückgesetzt werden, so dass die Zahl zusätzlicher Taktzyklen im Fehlerfall minimiert wird. Ein Vergleich mit anderen Erkennungs- und Korrekturschemata zeigt einen deutlich reduzierten Hardwaremehraufwand.

In this paper a soft error correction scheme for embedded level sensitive storage elements is presented. The scheme employs structural- and information-redundancy to detect and correct Single Event Upsets (SEUs) in registers. With low additional hardware overhead the affected bit can be localized and reset with the presented Bit-Flipping-Latch (BFL), thereby minimizing the amount of additional clock cycles in the faulty case. A comparison with other detection and correction schemes shows a significantly lower hardware overhead.

BibTeX:
@inproceedings{ImhofW2011,
  author = {Imhof, Michael E. and Wunderlich, Hans-Joachim},
  title = {{Korrektur transienter Fehler in eingebetteten Speicherelementen}},
  booktitle = {5. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'11)},
  publisher = {VDE VERLAG GMBH},
  year = {2011},
  volume = {231},
  pages = {76--83},
  keywords = {Transiente Fehler; Soft Error; Single Event Upset (SEU); Erkennung; Lokalisierung; Korrektur; Latch; Register; Single Event Effect; Detection; Localization; Correction},
  abstract = {In der vorliegenden Arbeit wird ein Schema zur Korrektur von transienten Fehlern in eingebetteten, pegelgesteuerten Speicherelementen vorgestellt. Das Schema verwendet Struktur- und Informationsredundanz, um Single Event Upsets (SEUs) in Registern zu erkennen und zu korrigieren. Mit geringem Mehraufwand kann ein betroffenes Bit lokalisiert und mit einem hier vorgestellten Bit-Flipping-Latch (BFL) rückgesetzt werden, so dass die Zahl zusätzlicher Taktzyklen im Fehlerfall minimiert wird. Ein Vergleich mit anderen Erkennungs- und Korrekturschemata zeigt einen deutlich reduzierten Hardwaremehraufwand.

In this paper a soft error correction scheme for embedded level sensitive storage elements is presented. The scheme employs structural- and information-redundancy to detect and correct Single Event Upsets (SEUs) in registers. With low additional hardware overhead the affected bit can be localized and reset with the presented Bit-Flipping-Latch (BFL), thereby minimizing the amount of additional clock cycles in the faulty case. A comparison with other detection and correction schemes shows a significantly lower hardware overhead.}, url = {http://www.vde-verlag.de/proceedings-de/453357010.html}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/ZuE_ImhofW2011.pdf} }

176. Eingebetteter Test zur hochgenauen Defekt-Lokalisierung
Mumtaz, A., Imhof, M.E., Holst, S. and Wunderlich, H.-J.
5. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'11)
Vol. 231, Hamburg-Harburg, Germany, 27-29 September 2011, pp. 43-47
2011
URL PDF 
Keywords: Eingebetteter Selbsttest; Pseudoerschöpfender Test; Diagnose; Debug; BIST; Pseudo-Exhaustive Testing; Diagnosis; Debug
Abstract: Moderne Diagnosealgorithmen können aus den vorhandenen Fehlerdaten direkt die defekte Schaltungsstruktur identifizieren, ohne sich auf spezialisierte Fehlermodelle zu beschränken. Solche Algorithmen benötigen jedoch Testmuster mit einer hohen Defekterfassung. Dies ist insbesondere im eingebetteten Test eine große Herausforderung. Der Partielle Pseudo-Erschöpfende Test (P-PET) ist eine Methode, um die Defekterfassung im Vergleich zu einem Zufallstest oder einem deterministischen Test für das Haftfehlermodell zu erhöhen. Wird die im eingebetteten Test übliche Phase der vorgeschalteten Erzeugung von Pseudozufallsmustern durch die Erzeugung partieller pseudo-erschöpfender Muster ersetzt, kann bei vergleichbarem Hardware-Aufwand und gleicher Testzeit eine optimale Defekterfassung für den größten Schaltungsteil erreicht werden. Diese Arbeit kombiniert zum ersten Mal P-PET mit einem fehlermodell-unabhängigen Diagnosealgorithmus und zeigt, dass sich beliebige Defekte im Mittel wesentlich präziser diagnostizieren lassen als mit Zufallsmustern oder einem deterministischen Test für Haftfehler.

Modern diagnosis algorithms are able to identify the defective circuit structure directly from existing fail data without being limited to any specialized fault models. Such algorithms however require test patterns with a high defect coverage, posing a major challenge particularly for embedded testing.
In mixed-mode embedded test, a large amount of pseudo-random patterns are applied prior to deterministic test pattern. Partial Pseudo-Exhaustive Testing (P-PET) replaces these pseudo-random patterns during embedded testing by partial pseudo-exhaustive patterns to test a large portion of a circuit fault-model independently. The overall defect coverage is optimized compared to random testing or deterministic tests using the stuck-at fault model while maintaining a comparable hardware overhead and the same test application time.
This work for the first time combines P-PET with a fault model independent diagnosis algorithm and shows that arbitrary defects can be diagnosed on average much more precisely than with standard embedded testing. The results are compared to random pattern testing and deterministic testing targeting stuck-at faults.

BibTeX:
@inproceedings{MumtaIHW2011a,
  author = {Mumtaz, Abdullah and Imhof, Michael E. and Holst, Stefan and Wunderlich, Hans-Joachim},
  title = {{Eingebetteter Test zur hochgenauen Defekt-Lokalisierung}},
  booktitle = {5. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'11)},
  publisher = {VDE VERLAG GMBH},
  year = {2011},
  volume = {231},
  pages = {43--47},
  keywords = {Eingebetteter Selbsttest; Pseudoerschöpfender Test; Diagnose; Debug; BIST; Pseudo-Exhaustive Testing; Diagnosis; Debug},
  abstract = {Moderne Diagnosealgorithmen können aus den vorhandenen Fehlerdaten direkt die defekte Schaltungsstruktur identifizieren, ohne sich auf spezialisierte Fehlermodelle zu beschränken. Solche Algorithmen benötigen jedoch Testmuster mit einer hohen Defekterfassung. Dies ist insbesondere im eingebetteten Test eine große Herausforderung. Der Partielle Pseudo-Erschöpfende Test (P-PET) ist eine Methode, um die Defekterfassung im Vergleich zu einem Zufallstest oder einem deterministischen Test für das Haftfehlermodell zu erhöhen. Wird die im eingebetteten Test übliche Phase der vorgeschalteten Erzeugung von Pseudozufallsmustern durch die Erzeugung partieller pseudo-erschöpfender Muster ersetzt, kann bei vergleichbarem Hardware-Aufwand und gleicher Testzeit eine optimale Defekterfassung für den größten Schaltungsteil erreicht werden. Diese Arbeit kombiniert zum ersten Mal P-PET mit einem fehlermodell-unabhängigen Diagnosealgorithmus und zeigt, dass sich beliebige Defekte im Mittel wesentlich präziser diagnostizieren lassen als mit Zufallsmustern oder einem deterministischen Test für Haftfehler.

Modern diagnosis algorithms are able to identify the defective circuit structure directly from existing fail data without being limited to any specialized fault models. Such algorithms however require test patterns with a high defect coverage, posing a major challenge particularly for embedded testing.
In mixed-mode embedded test, a large amount of pseudo-random patterns are applied prior to deterministic test pattern. Partial Pseudo-Exhaustive Testing (P-PET) replaces these pseudo-random patterns during embedded testing by partial pseudo-exhaustive patterns to test a large portion of a circuit fault-model independently. The overall defect coverage is optimized compared to random testing or deterministic tests using the stuck-at fault model while maintaining a comparable hardware overhead and the same test application time.
This work for the first time combines P-PET with a fault model independent diagnosis algorithm and shows that arbitrary defects can be diagnosed on average much more precisely than with standard embedded testing. The results are compared to random pattern testing and deterministic testing targeting stuck-at faults.}, url = {http://www.vde-verlag.de/proceedings-de/453357010.html}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/ZuE_MumtaIHW2011a.pdf} }

175. P-PET: Partial Pseudo-Exhaustive Test for High Defect Coverage
Mumtaz, A., Imhof, M.E. and Wunderlich, H.-J.
Proceedings of the IEEE International Test Conference (ITC'11), Anaheim, California, USA, 20-22 September 2011
2011
DOI PDF 
Keywords: BIST; Pseudo-Exhaustive Testing; Defect Coverage; N-Detect
Abstract: Pattern generation for embedded testing often consists of a phase generating random patterns and a second phase where deterministic patterns are applied. This paper presents a method which optimizes the first phase significantly and increases the defect coverage, while reducing the number of deterministic patterns required in the second phase.
The method is based on the concept of pseudo-exhaustive testing (PET), which was proposed as a method for fault model independent testing with high defect coverage. As its test length can grow exponentially with the circuit size, an application to larger circuits is usually impractical.
In this paper, partial pseudo-exhaustive testing (P-PET) is presented as a synthesis technique for multiple polynomial feedback shift registers. It scales with actual technology and is comparable with the usual pseudo-random (PR) pattern testing regarding test costs and test application time. The advantages with respect to the defect coverage, N-detectability for stuck-at faults and the reduction of deterministic test lengths are shown using state-of-the art industrial circuits.
BibTeX:
@inproceedings{MumtaIW2011,
  author = {Mumtaz, Abdullah and Imhof, Michael E. and Wunderlich, Hans-Joachim},
  title = {{P-PET: Partial Pseudo-Exhaustive Test for High Defect Coverage}},
  booktitle = {Proceedings of the IEEE International Test Conference (ITC'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  keywords = {BIST; Pseudo-Exhaustive Testing; Defect Coverage; N-Detect},
  abstract = {Pattern generation for embedded testing often consists of a phase generating random patterns and a second phase where deterministic patterns are applied. This paper presents a method which optimizes the first phase significantly and increases the defect coverage, while reducing the number of deterministic patterns required in the second phase.
The method is based on the concept of pseudo-exhaustive testing (PET), which was proposed as a method for fault model independent testing with high defect coverage. As its test length can grow exponentially with the circuit size, an application to larger circuits is usually impractical.
In this paper, partial pseudo-exhaustive testing (P-PET) is presented as a synthesis technique for multiple polynomial feedback shift registers. It scales with actual technology and is comparable with the usual pseudo-random (PR) pattern testing regarding test costs and test application time. The advantages with respect to the defect coverage, N-detectability for stuck-at faults and the reduction of deterministic test lengths are shown using state-of-the art industrial circuits.}, doi = {http://dx.doi.org/10.1109/TEST.2011.6139130}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/ITC_MumtazIW2011.pdf} }
174. A Novel Scan Segmentation Design Method for Avoiding Shift Timing Failures in Scan Testing
Yamato, Y., Wen, X., Kochte, M.A., Miyase, K., Kajihara, S. and Wang, L.-T.
Proceedings of the IEEE International Test Conference (ITC'11), Anaheim, California, USA, 20-22 September 2011
2011
DOI PDF 
Keywords: scan testing; shift power reduction; scan segmentation; switching activity; clock tree; clock skew
Abstract: High power consumption in scan testing can cause undue yield loss which has increasingly become a serious problem for deep-submicron VLSI circuits. Growing evidence attributes this problem to shift timing failures, which are primarily caused by excessive switching activity in the proximities of clock paths that tends to introduce severe clock skew due to IR-drop-induced delay increase. This paper is the first of its kind to address this critical issue with a novel layout-aware scheme based on scan segmentation design, called LCTI-SS (Low-Clock-Tree-
Impact Scan Segmentation). An optimal combination of scan segments is identified for simultaneous clocking so that the switching activity in the proximities of clock trees is reduced while maintaining the average power reduction effect on conventional scan segmentation. Experimental results on benchmark and industrial circuits have demonstrated the advantage of the LCTI-SS scheme.
BibTeX:
@inproceedings{YamatWKMKW2011,
  author = {Yamato, Yuta and Wen, Xiaoqing and Kochte, Michael A. and Miyase, Kohei and Kajihara, Seiji and Wang, Laung-Terng},
  title = {{A Novel Scan Segmentation Design Method for Avoiding Shift Timing Failures in Scan Testing}},
  booktitle = {Proceedings of the IEEE International Test Conference (ITC'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  keywords = {scan testing; shift power reduction; scan segmentation; switching activity; clock tree; clock skew},
  abstract = {High power consumption in scan testing can cause undue yield loss which has increasingly become a serious problem for deep-submicron VLSI circuits. Growing evidence attributes this problem to shift timing failures, which are primarily caused by excessive switching activity in the proximities of clock paths that tends to introduce severe clock skew due to IR-drop-induced delay increase. This paper is the first of its kind to address this critical issue with a novel layout-aware scheme based on scan segmentation design, called LCTI-SS (Low-Clock-Tree-
Impact Scan Segmentation). An optimal combination of scan segments is identified for simultaneous clocking so that the switching activity in the proximities of clock trees is reduced while maintaining the average power reduction effect on conventional scan segmentation. Experimental results on benchmark and industrial circuits have demonstrated the advantage of the LCTI-SS scheme.}, doi = {http://dx.doi.org/10.1109/TEST.2011.6139162}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/ITC_YamatWKMKW2011.pdf} }
173. Efficient Multi-level Fault Simulation of HW/SW Systems for Structural Faults
Baranowski, R., Di Carlo, S., Hatami, N., Imhof, M.E., Kochte, M.A., Prinetto, P., Wunderlich, H.-J. and Zoellin, C.G.
SCIENCE CHINA Information Sciences
Vol. 54(9), September 2011, pp. 1784-1796
2011
DOI PDF 
Keywords: fault simulation; multi-level; transaction-level modeling
Abstract: In recent technology nodes, reliability is increasingly considered a part of the standard design flow to be taken into account at all levels of embedded systems design. While traditional fault simulation techniques based on low-level models at gate- and register transfer-level offer high accuracy, they are too inefficient to properly cope with the complexity of modern embedded systems. Moreover, they do not allow for early exploration of design alternatives when a detailed model of the whole system is not yet available, which is highly required to increase the efficiency and quality of the design flow. Multi-level models that combine the simulation efficiency of high abstraction models with the accuracy of low-level models are therefore essential to efficiently evaluate the impact of physical defects on the system. This paper proposes a methodology to efficiently implement concurrent multi-level fault simulation across gate- and transaction-level models in an integrated simulation environment. It leverages state-of-the-art techniques for efficient fault simulation of structural faults together with transaction-level modeling. This combination of different models allows to accurately evaluate the impact of faults on the entire hardware/software system while keeping the computational effort low. Moreover, since only selected portions of the system require low-level models, early exploration of different design alternatives is efficiently supported. Experimental results obtained from three case studies are presented to demonstrate the high accuracy of the proposed method when compared with a standard gate/RT mixed-level approach and the strong improvement of simulation time which is reduced by four orders of magnitude in average.
BibTeX:
@article{BaranDHIKPWZ2011,
  author = {Baranowski, Rafal and Di Carlo, Stefano and Hatami, Nadereh and Imhof, Michael E. and Kochte, Michael A. and Prinetto, Paolo and Wunderlich, Hans-Joachim and Zoellin, Christian G.},
  title = {{Efficient Multi-level Fault Simulation of HW/SW Systems for Structural Faults}},
  journal = {SCIENCE CHINA Information Sciences},
  publisher = {Science China Press, co-published with Springer-Verlag},
  year = {2011},
  volume = {54},
  number = {9},
  pages = {1784--1796},
  keywords = {fault simulation; multi-level; transaction-level modeling},
  abstract = {In recent technology nodes, reliability is increasingly considered a part of the standard design flow to be taken into account at all levels of embedded systems design. While traditional fault simulation techniques based on low-level models at gate- and register transfer-level offer high accuracy, they are too inefficient to properly cope with the complexity of modern embedded systems. Moreover, they do not allow for early exploration of design alternatives when a detailed model of the whole system is not yet available, which is highly required to increase the efficiency and quality of the design flow. Multi-level models that combine the simulation efficiency of high abstraction models with the accuracy of low-level models are therefore essential to efficiently evaluate the impact of physical defects on the system. This paper proposes a methodology to efficiently implement concurrent multi-level fault simulation across gate- and transaction-level models in an integrated simulation environment. It leverages state-of-the-art techniques for efficient fault simulation of structural faults together with transaction-level modeling. This combination of different models allows to accurately evaluate the impact of faults on the entire hardware/software system while keeping the computational effort low. Moreover, since only selected portions of the system require low-level models, early exploration of different design alternatives is efficiently supported. Experimental results obtained from three case studies are presented to demonstrate the high accuracy of the proposed method when compared with a standard gate/RT mixed-level approach and the strong improvement of simulation time which is reduced by four orders of magnitude in average.},
  doi = {http://dx.doi.org/10.1007/s11432-011-4366-9},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/SCIS_BaranDHIKPWZ2011.pdf}
}
172. Variation-Aware Fault Modeling
Hopsch, F., Becker, B., Hellebrand, S., Polian, I., Straube, B., Vermeiren, W. and Wunderlich, H.-J.
SCIENCE CHINA Information Sciences
Vol. 54(9), September 2011, pp. 1813-1826
2011
DOI PDF 
Keywords: process variations; test methods; statistical test; histogram data base
Abstract: To achieve a high product quality for nano-scale systems, both realistic defect mechanisms and process variations must be taken into account. While existing approaches for variation-aware digital testing either restrict themselves to special classes of defects or assume given probability distributions to model variabilities, the proposed approach combines defect-oriented testing with statistical library characterization. It uses Monte Carlo simulations at electrical level to extract delay distributions of cells in the presence of defects and for the defect-free case. This allows distinguishing the effects of process variations on the cell delay from defectinduced cell delays under process variations. To provide a suitable interface for test algorithms at higher levels of abstraction, the distributions are represented as histograms and stored in a histogram data base (HDB). Thus, the computationally expensive defect analysis needs to be performed only once as a preprocessing step for library characterization, and statistical test algorithms do not require any low level information beyond the HDB. The generation of the HDB is demonstrated for primitive cells in 45 nm technology.
BibTeX:
@article{HopscBHPSVW2011,
  author = {Hopsch, Fabian and Becker, Bernd and Hellebrand, Sybille and Polian, Ilia and Straube, Bernd and Vermeiren, Wolfgang and Wunderlich, Hans-Joachim},
  title = {{Variation-Aware Fault Modeling}},
  journal = {SCIENCE CHINA Information Sciences},
  publisher = {Science China Press, co-published with Springer-Verlag},
  year = {2011},
  volume = {54},
  number = {9},
  pages = {1813--1826},
  keywords = {process variations; test methods; statistical test; histogram data base},
  abstract = {To achieve a high product quality for nano-scale systems, both realistic defect mechanisms and process variations must be taken into account. While existing approaches for variation-aware digital testing either restrict themselves to special classes of defects or assume given probability distributions to model variabilities, the proposed approach combines defect-oriented testing with statistical library characterization. It uses Monte Carlo simulations at electrical level to extract delay distributions of cells in the presence of defects and for the defect-free case. This allows distinguishing the effects of process variations on the cell delay from defectinduced cell delays under process variations. To provide a suitable interface for test algorithms at higher levels of abstraction, the distributions are represented as histograms and stored in a histogram data base (HDB). Thus, the computationally expensive defect analysis needs to be performed only once as a preprocessing step for library characterization, and statistical test algorithms do not require any low level information beyond the HDB. The generation of the HDB is demonstrated for primitive cells in 45 nm technology.},
  doi = {http://dx.doi.org/10.1007/s11432-011-4367-8},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/SCIS_HopscBHPSVW2011.pdf}
}
171. SAT-based Capture-Power Reduction for At-Speed Broadcast-Scan-Based Test Compression Architectures
Kochte, M.A., Miyase, K., Wen, X., Kajihara, S., Yamato, Y., Enokimoto, K. and Wunderlich, H.-J.
Proceedings of the 17th IEEE/ACM International Symposium on Low Power Electronics and Design (ISLPED'11), Fukuoka, Japan, 1-3 August 2011, pp. 33-38
2011
DOI URL PDF 
Keywords: Low capture-power test; X-filling; ATPG
Abstract: Excessive power dissipation during VLSI testing results in over-testing, yield loss and heat damage of the device. For low power devices with advanced power management features and more stringent power budgets, power-aware testing is even more mandatory. Effective and efficient test set postprocessing techniques based on X-identification and power-aware X-filling have been proposed for external and embedded deterministic test. This work proposes a novel X-filling algorithm for combinational and broadcast-scan-based test compression schemes which have great practical significance. The algorithm ensures compressibility of test cubes using a SAT-based check. Compared to methods based on topological justification, the solution space of the compressed test vector is not pruned early during the search. Thus, this method allows much more precise low-power X-filling of test vectors. Experiments on benchmark and industrial circuits show the applicability to capture-power reduction during scan testing.
BibTeX:
@inproceedings{KochtMWKYEW2011,
  author = {Kochte, Michael A. and Miyase, Kohei and Wen, Xiaoqing and Kajihara, Seiji and Yamato, Yuta and Enokimoto, Kazunari and Wunderlich, Hans-Joachim},
  title = {{SAT-based Capture-Power Reduction for At-Speed Broadcast-Scan-Based Test Compression Architectures}},
  booktitle = {Proceedings of the 17th IEEE/ACM International Symposium on Low Power Electronics and Design (ISLPED'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {33--38},
  keywords = {Low capture-power test; X-filling; ATPG},
  abstract = {Excessive power dissipation during VLSI testing results in over-testing, yield loss and heat damage of the device. For low power devices with advanced power management features and more stringent power budgets, power-aware testing is even more mandatory. Effective and efficient test set postprocessing techniques based on X-identification and power-aware X-filling have been proposed for external and embedded deterministic test. This work proposes a novel X-filling algorithm for combinational and broadcast-scan-based test compression schemes which have great practical significance. The algorithm ensures compressibility of test cubes using a SAT-based check. Compared to methods based on topological justification, the solution space of the compressed test vector is not pruned early during the search. Thus, this method allows much more precise low-power X-filling of test vectors. Experiments on benchmark and industrial circuits show the applicability to capture-power reduction during scan testing.},
  url = {http://dl.acm.org/citation.cfm?id=2016802.2016812},
  doi = {http://dx.doi.org/10.1109/ISLPED.2011.5993600},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/ISLPED_KochtMWKYEW2011.pdf}
}
170. Fail-Safety in Core-Based System Design
Baranowski, R. and Wunderlich, H.-J.
Proceedings of the 17th IEEE International On-Line Testing Symposium (IOLTS'11), Athens, Greece, 13-15 July 2011, pp. 278-283
2011
DOI PDF 
Keywords: fail-safe design; core-based design; IP reuse methodology
Abstract: As scaling of nanoelectronics may deteriorate dependability, fail-safe design techniques gain attention. We apply the concept of fail-safety to IP core-based system design, making the first step towards dependability-aware reuse methodologies. We introduce a methodology for dependability characterization, which uses informal techniques to identify hazards and employs formal methods to check if the hazards occur. The proposed hazard metrics provide qualitative and quantitative insight into possible core misbehavior. Experimental results on two IP cores show that the approach enables early comparative dependability studies.
BibTeX:
@inproceedings{BaranW2011,
  author = {Baranowski, Rafal and Wunderlich, Hans-Joachim},
  title = {{Fail-Safety in Core-Based System Design}},
  booktitle = {Proceedings of the 17th IEEE International On-Line Testing Symposium (IOLTS'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {278--283},
  keywords = {fail-safe design; core-based design; IP reuse methodology},
  abstract = {As scaling of nanoelectronics may deteriorate dependability, fail-safe design techniques gain attention. We apply the concept of fail-safety to IP core-based system design, making the first step towards dependability-aware reuse methodologies. We introduce a methodology for dependability characterization, which uses informal techniques to identify hazards and employs formal methods to check if the hazards occur. The proposed hazard metrics provide qualitative and quantitative insight into possible core misbehavior. Experimental results on two IP cores show that the approach enables early comparative dependability studies.},
  doi = {http://dx.doi.org/10.1109/IOLTS.2011.5994542},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/IOLTS_BaranW2011.pdf}
}
169. Soft Error Correction in Embedded Storage Elements
Imhof, M.E. and Wunderlich, H.-J.
Proceedings of the 17th IEEE International On-Line Testing Symposium (IOLTS'11), Athens, Greece, 13-15 July 2011, pp. 169-174
2011
DOI PDF 
Keywords: Single Event Effect; Correction; Latch; Register
Abstract: In this paper a soft error correction scheme for embedded storage elements in level sensitive designs is presented. It employs space redundancy to detect and locate Single Event Upsets (SEUs). It is able to detect SEUs in registers and employ architectural replay to perform correction with low additional hardware overhead. Together with the proposed bit flipping latch an online correction can be implemented on bit level with a minimal loss of clock cycles. A comparison with other detection and correction schemes shows a significantly lower hardware overhead.
BibTeX:
@inproceedings{ImhofW2011a,
  author = {Imhof, Michael E. and Wunderlich, Hans-Joachim},
  title = {{Soft Error Correction in Embedded Storage Elements}},
  booktitle = {Proceedings of the 17th IEEE International On-Line Testing Symposium (IOLTS'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {169--174},
  keywords = {Single Event Effect; Correction; Latch; Register},
  abstract = {In this paper a soft error correction scheme for embedded storage elements in level sensitive designs is presented. It employs space redundancy to detect and locate Single Event Upsets (SEUs). It is able to detect SEUs in registers and employ architectural replay to perform correction with low additional hardware overhead. Together with the proposed bit flipping latch an online correction can be implemented on bit level with a minimal loss of clock cycles. A comparison with other detection and correction schemes shows a significantly lower hardware overhead.},
  doi = {http://dx.doi.org/10.1109/IOLTS.2011.5993832},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/IOLTS_ImhofW2011.pdf}
}
168. Structural In-Field Diagnosis for Random Logic Circuits
Cook, A., Elm, M., Wunderlich, H.-J. and Abelein, U.
Proceedings of the 16th IEEE European Test Symposium (ETS'11), Trondheim, Norway, 23-27 May 2011, pp. 111-116
2011
DOI URL PDF 
Keywords: In-field diagnosis; Built-In Self-Diagnosis
Abstract: In-field diagnosability of electronic components in larger systems such as automobiles becomes a necessity for both customers and system integrators. Traditionally, functional diagnosis is applied during integration and in workshops for infield failures or break-downs. However, functional diagnosis does not yield sufficient coverage to allow for short repair times and fast reaction on systematic failures in the production. Structural diagnosis could yield the desired coverage, yet recent builtin architectures which could be reused in the field either do not reveal diagnostic information or necessitate dedicated test schemes.
The paper at hand closes this gap with a new built-in test method for autonomous in-field testing and in-field diagnostic data collection. The proposed Built-In Self-Diagnosis method (BISD) is based on the standard BIST architecture and can seamlessly be integrated with recent, commercial DfT techniques. Experiments with industrial designs show that its overhead is marginal and its structural diagnostic capabilities are comparable to those of external diagnosis on high-end test equipment.
BibTeX:
@inproceedings{CookEWA2011,
  author = {Cook, Alejandro and Elm, Melanie and Wunderlich, Hans-Joachim and Abelein, Ulrich},
  title = {{Structural In-Field Diagnosis for Random Logic Circuits}},
  booktitle = {Proceedings of the 16th IEEE European Test Symposium (ETS'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {111--116},
  keywords = {In-field diagnosis; Built-In Self-Diagnosis},
  abstract = {In-field diagnosability of electronic components in larger systems such as automobiles becomes a necessity for both customers and system integrators. Traditionally, functional diagnosis is applied during integration and in workshops for infield failures or break-downs. However, functional diagnosis does not yield sufficient coverage to allow for short repair times and fast reaction on systematic failures in the production. Structural diagnosis could yield the desired coverage, yet recent builtin architectures which could be reused in the field either do not reveal diagnostic information or necessitate dedicated test schemes.
The paper at hand closes this gap with a new built-in test method for autonomous in-field testing and in-field diagnostic data collection. The proposed Built-In Self-Diagnosis method (BISD) is based on the standard BIST architecture and can seamlessly be integrated with recent, commercial DfT techniques. Experiments with industrial designs show that its overhead is marginal and its structural diagnostic capabilities are comparable to those of external diagnosis on high-end test equipment.}, url = {http://www.computer.org/csdl/proceedings/ets/2011/4433/00/4433a111-abs.html}, doi = {http://dx.doi.org/10.1109/ETS.2011.25}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/ETS_CookEWA2011.pdf} }
167. Structural Test for Graceful Degradation of NoC Switches
Dalirsani, A., Holst, S., Elm, M. and Wunderlich, H.-J.
Proceedings of the 16th IEEE European Test Symposium (ETS'11), Trondheim, Norway, 23-27 May 2011, pp. 183-188
2011
DOI URL PDF 
Keywords: Network-on-Chip; Graceful Degradation; Performability; Logic Diagnosis
Abstract: Networks-on-Chip (NoCs) are implicitly fault tolerant due to their inherent redundancy. They can overcome defective cores, links and switches. As a side effect, yield is increased at the cost of reduced performability. In this paper, a new diagnosis method based on the standard flow of industrial volume testing is presented, which is able to identify the intact functions rather than providing only a pass/fail result for the complete switch.
The new method combines for the first time the precision of structural testing with information on the functional behavior in the presence of defects to determine the unaffected switch functions and use partially defective NoC switches. According to the experimental results, this improves the performability of NoCs as more than 61% of defects only impair one switch port. Unlike previous methods for implementing fault tolerant switches, the developed technique does not impose any additional area overhead and is compatible with any switch design.
BibTeX:
@inproceedings{DalirHEW2011,
  author = {Dalirsani, Atefe and Holst, Stefan and Elm, Melanie and Wunderlich, Hans-Joachim},
  title = {{Structural Test for Graceful Degradation of NoC Switches}},
  booktitle = {Proceedings of the 16th IEEE European Test Symposium (ETS'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {183--188},
  keywords = {Network-on-Chip; Graceful Degradation; Performability; Logic Diagnosis},
  abstract = {Networks-on-Chip (NoCs) are implicitly fault tolerant due to their inherent redundancy. They can overcome defective cores, links and switches. As a side effect, yield is increased at the cost of reduced performability. In this paper, a new diagnosis method based on the standard flow of industrial volume testing is presented, which is able to identify the intact functions rather than providing only a pass/fail result for the complete switch.
The new method combines for the first time the precision of structural testing with information on the functional behavior in the presence of defects to determine the unaffected switch functions and use partially defective NoC switches. According to the experimental results, this improves the performability of NoCs as more than 61% of defects only impair one switch port. Unlike previous methods for implementing fault tolerant switches, the developed technique does not impose any additional area overhead and is compatible with any switch design.}, url = {http://www.computer.org/csdl/proceedings/ets/2011/4433/00/4433a183-abs.html}, doi = {http://dx.doi.org/10.1109/ETS.2011.33}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/ETS_DalirHEW2011.pdf} }
166. Towards Variation-Aware Test Methods
Polian, I., Becker, B., Hellebrand, S., Wunderlich, H.-J. and Maxwell, P.
Proceedings of the 16th IEEE European Test Symposium (ETS'11), Trondheim, Norway, 23-27 May 2011, pp. 219-225
2011
DOI URL PDF 
Keywords: Parameter variations; Adaptive test; Delay test
Abstract: Nanoelectronic circuits are increasingly affected by massive statistical process variations, leading to a paradigm shift in both design and test area. In circuit and system design, a broad class of methods for robustness like statistical design and self calibration has emerged and is increasingly used by the industry. The test community's answer to the massive-variation challenge is currently adaptive test. The test stimuli are modified on the fly (during test application) based on the circuit responses observed. The collected circuit outputs undergo statistical post-processing to facilitate pass/fail classification. We will present fundamentals of adaptive and robust test techniques and their theoretical background. While adaptive test is effective, the understanding how it covers defects under different process parameter combinations is not fully established yet with respect to algorithmic foundations. For this reason, novel analytic and algorithmic approaches in the field of variation-aware testing will also be presented in the tutorial. Coverage of defects in the process parameter space is modeled and maximized by an interplay between special fault simulation and multi-constrained ATPG algorithms. These systematic approaches can complement adaptive test application schemes to form a closed-loop system that combines analytical data with measurement results for maximal test quality.
BibTeX:
@inproceedings{PoliaBHWM2011,
  author = {Polian, Ilia and Becker, Bernd and Hellebrand, Sybille and Wunderlich, Hans-Joachim and Maxwell, Peter},
  title = {{Towards Variation-Aware Test Methods}},
  booktitle = {Proceedings of the 16th IEEE European Test Symposium (ETS'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {219--225},
  keywords = {Parameter variations; Adaptive test; Delay test},
  abstract = {Nanoelectronic circuits are increasingly affected by massive statistical process variations, leading to a paradigm shift in both design and test area. In circuit and system design, a broad class of methods for robustness like statistical design and self calibration has emerged and is increasingly used by the industry. The test community's answer to the massive-variation challenge is currently adaptive test. The test stimuli are modified on the fly (during test application) based on the circuit responses observed. The collected circuit outputs undergo statistical post-processing to facilitate pass/fail classification. We will present fundamentals of adaptive and robust test techniques and their theoretical background. While adaptive test is effective, the understanding how it covers defects under different process parameter combinations is not fully established yet with respect to algorithmic foundations. For this reason, novel analytic and algorithmic approaches in the field of variation-aware testing will also be presented in the tutorial. Coverage of defects in the process parameter space is modeled and maximized by an interplay between special fault simulation and multi-constrained ATPG algorithms. These systematic approaches can complement adaptive test application schemes to form a closed-loop system that combines analytical data with measurement results for maximal test quality.},
  url = {http://www.computer.org/csdl/proceedings/ets/2011/4433/00/4433a219-abs.html},
  doi = {http://dx.doi.org/10.1109/ETS.2011.51},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/ETS_PoliaBHWM2011.pdf}
}
165. Power-Aware Test Generation with Guaranteed Launch Safety for At-Speed Scan Testing
Wen, X., Enokimoto, K., Miyase, K., Yamato, Y., Kochte, M.A., Kajihara, S., Girard, P. and Tehranipoor, M.
Proceedings of the 29th IEEE VLSI Test Symposium (VTS'11), Dana Point, California, USA, 1-5 May 2011, pp. 166-171
2011
DOI PDF 
Keywords: test generation; test power; at-speed scan testing; power supply noise; launch safety
Abstract: At-speed scan testing may suffer from severe yield loss due to the launch safety problem, where test responses are invalidated by excessive launch switching activity (LSA) caused by test stimulus launching in the at-speed test cycle. However, previous low-power test generation techniques can only reduce LSA to some extent but cannot guarantee launch safety. This paper proposes a novel & practical power-aware test generation flow, featuring guaranteed launch safety. The basic idea is to enhance ATPG with a unique two-phase (rescue & mask) scheme by targeting at the real cause of the launch safety problem, i.e., the excessive LSA in the neighboring areas (namely impact areas) around long paths sensitized by a test vector. The rescue phase is to reduce excessive LSA in impact areas in a focused manner, and the mask phase is to exclude from use in fault detection the uncertain test response at the endpoint of any long sensitized path that still has excessive LSA in its impact area even after the rescue phase is executed. This scheme is the first of its kind for achieving guaranteed launch safety with minimal impact on test quality and test costs, which is the ultimate goal of power-aware at-speed scan test generation.
BibTeX:
@inproceedings{WenEMYKKGT2011,
  author = {Wen, Xiaoqing and Enokimoto, Kazunari and Miyase, Kohei and Yamato, Yuta and Kochte, Michael A. and Kajihara, Seiji and Girard, Patrick and Tehranipoor, Mohammad},
  title = {{Power-Aware Test Generation with Guaranteed Launch Safety for At-Speed Scan Testing}},
  booktitle = {Proceedings of the 29th IEEE VLSI Test Symposium (VTS'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {166--171},
  keywords = {test generation; test power; at-speed scan testing; power supply noise; launch safety},
  abstract = {At-speed scan testing may suffer from severe yield loss due to the launch safety problem, where test responses are invalidated by excessive launch switching activity (LSA) caused by test stimulus launching in the at-speed test cycle. However, previous low-power test generation techniques can only reduce LSA to some extent but cannot guarantee launch safety. This paper proposes a novel & practical power-aware test generation flow, featuring guaranteed launch safety. The basic idea is to enhance ATPG with a unique two-phase (rescue & mask) scheme by targeting at the real cause of the launch safety problem, i.e., the excessive LSA in the neighboring areas (namely impact areas) around long paths sensitized by a test vector. The rescue phase is to reduce excessive LSA in impact areas in a focused manner, and the mask phase is to exclude from use in fault detection the uncertain test response at the endpoint of any long sensitized path that still has excessive LSA in its impact area even after the rescue phase is executed. This scheme is the first of its kind for achieving guaranteed launch safety with minimal impact on test quality and test costs, which is the ultimate goal of power-aware at-speed scan test generation.},
  doi = {http://dx.doi.org/10.1109/VTS.2011.5783778},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/VTS_WenEMYKKGT2011.pdf}
}
164. SAT-Based Fault Coverage Evaluation in the Presence of Unknown Values
Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the ACM/IEEE Design Automation and Test in Europe (DATE'11), Grenoble, France, 14-18 March 2011, pp. 1303-1308
2011
DOI URL PDF 
Keywords: Unknown values; fault coverage; precise fault simulation
Abstract: Fault simulation of digital circuits must correctly compute fault coverage to assess test and product quality. In case of unknown values (X-values), fault simulation is pessimistic and underestimates actual fault coverage, resulting in increased test time and data volume, as well as higher overhead for design- for-test. This work proposes a novel algorithm to determine fault coverage with significantly increased accuracy, offering increased fault coverage at no cost, or the reduction of test costs for the targeted coverage. The algorithm is compared to related work and evaluated on benchmark and industrial circuits.
BibTeX:
@inproceedings{KochtW2011,
  author = {Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{SAT-Based Fault Coverage Evaluation in the Presence of Unknown Values}},
  booktitle = {Proceedings of the ACM/IEEE Design Automation and Test in Europe (DATE'11)},
  publisher = {IEEE Computer Society},
  year = {2011},
  pages = {1303--1308},
  keywords = {Unknown values; fault coverage; precise fault simulation},
  abstract = {Fault simulation of digital circuits must correctly compute fault coverage to assess test and product quality. In case of unknown values (X-values), fault simulation is pessimistic and underestimates actual fault coverage, resulting in increased test time and data volume, as well as higher overhead for design- for-test. This work proposes a novel algorithm to determine fault coverage with significantly increased accuracy, offering increased fault coverage at no cost, or the reduction of test costs for the targeted coverage. The algorithm is compared to related work and evaluated on benchmark and industrial circuits.},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5763209},
  doi = {http://dx.doi.org/10.1109/DATE.2011.5763209},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2011/DATE_KochtW2011.pdf}
}
163. On Determining the Real Output Xs by SAT-Based Reasoning
Elm, M., Kochte, M.A. and Wunderlich, H.-J.
Proceedings of the IEEE 19th Asian Test Symposium (ATS'10), Shanghai, China, 1-4 December 2010, pp. 39-44
2010
DOI URL PDF 
Keywords: X-Masking
Abstract: Embedded testing, built-in self-test and methods for test compression rely on efficient test response compaction. Often, a circuit under test contains sources of unknown values (X), uninitialized memories for instance. These X values propagate through the circuit and may spoil the response signatures. The standard way to overcome this problem is X-masking.
Outputs which carry an X value are usually determined by logic simulation. In this paper, we show that the amount of Xs is significantly overestimated, and in consequence outputs are overmasked, too. An efficient way for the exact computation of output Xs is presented for the first time. The resulting X-masking promises significant gains with respect to test time, test volume and fault coverage.
BibTeX:
@inproceedings{ElmKW2010,
  author = {Elm, Melanie and Kochte, Michael A. and Wunderlich, Hans-Joachim},
  title = {{On Determining the Real Output Xs by SAT-Based Reasoning}},
  booktitle = {Proceedings of the IEEE 19th Asian Test Symposium (ATS'10)},
  publisher = {IEEE Computer Society},
  year = {2010},
  pages = {39--44},
  keywords = {X-Masking},
  abstract = {Embedded testing, built-in self-test and methods for test compression rely on efficient test response compaction. Often, a circuit under test contains sources of unknown values (X), uninitialized memories for instance. These X values propagate through the circuit and may spoil the response signatures. The standard way to overcome this problem is X-masking.
Outputs which carry an X value are usually determined by logic simulation. In this paper, we show that the amount of Xs is significantly overestimated, and in consequence outputs are overmasked, too. An efficient way for the exact computation of output Xs is presented for the first time. The resulting X-masking promises significant gains with respect to test time, test volume and fault coverage.}, url = {http://www.computer.org/csdl/proceedings/ats/2010/4248/00/4248a039-abs.html}, doi = {http://dx.doi.org/10.1109/ATS.2010.16}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/ATS_ElmKW2010.pdf} }
162. Variation-Aware Fault Modeling
Hopsch, F., Becker, B., Hellebrand, S., Polian, I., Straube, B., Vermeiren, W. and Wunderlich, H.-J.
Proceedings of the IEEE 19th Asian Test Symposium (ATS'10), Shanghai, China, 1-4 December 2010, pp. 87-93
2010
DOI URL PDF 
Keywords: Defect-oriented testing; parameter variations; delay; analogue fault simulation; histograms
Abstract: To achieve a high product quality for nano-scale systems both realistic defect mechanisms and process variations must be taken into account. While existing approaches for variation- aware digital testing either restrict themselves to special classes of defects or assume given probability distributions to model variabilities, the proposed approach combines defectoriented testing with statistical library characterization. It uses Monte Carlo simulations at electrical level to extract delay distributions of cells in the presence of defects and for the defectfree case. This allows distinguishing the effects of process variations on the cell delay from defect-induced cell delays under process variations.
To provide a suitable interface for test algorithms at higher levels of abstraction the distributions are represented as histograms and stored in a histogram data base (HDB). Thus, the computationally expensive defect analysis needs to be performed only once as a preprocessing step for library characterization, and statistical test algorithms do not require any low level information beyond the HDB. The generation of the HDB is demonstrated for primitive cells in 45nm technology.
BibTeX:
@inproceedings{HopscBHPSVW2010,
  author = {Hopsch, Fabian and Becker, Bernd and Hellebrand, Sybille and Polian, Ilia and Straube, Bernd and Vermeiren, Wolfgang and Wunderlich, Hans-Joachim},
  title = {{Variation-Aware Fault Modeling}},
  booktitle = {Proceedings of the IEEE 19th Asian Test Symposium (ATS'10)},
  publisher = {IEEE Computer Society},
  year = {2010},
  pages = {87--93},
  keywords = {Defect-oriented testing; parameter variations; delay; analogue fault simulation; histograms},
  abstract = {To achieve a high product quality for nano-scale systems both realistic defect mechanisms and process variations must be taken into account. While existing approaches for variation- aware digital testing either restrict themselves to special classes of defects or assume given probability distributions to model variabilities, the proposed approach combines defectoriented testing with statistical library characterization. It uses Monte Carlo simulations at electrical level to extract delay distributions of cells in the presence of defects and for the defectfree case. This allows distinguishing the effects of process variations on the cell delay from defect-induced cell delays under process variations.
To provide a suitable interface for test algorithms at higher levels of abstraction the distributions are represented as histograms and stored in a histogram data base (HDB). Thus, the computationally expensive defect analysis needs to be performed only once as a preprocessing step for library characterization, and statistical test algorithms do not require any low level information beyond the HDB. The generation of the HDB is demonstrated for primitive cells in 45nm technology.}, url = {http://www.computer.org/csdl/proceedings/ats/2010/4248/00/4248a087-abs.html}, doi = {http://dx.doi.org/10.1109/ATS.2010.24}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/ATS_HopscBHPSVW2010.pdf} }
161. Efficient Simulation of Structural Faults for the Reliability Evaluation at System-Level
Kochte, M.A., Zoellin, C.G., Baranowski, R., Imhof, M.E., Wunderlich, H.-J., Hatami, N., Di Carlo, S. and Prinetto, P.
Proceedings of the IEEE 19th Asian Test Symposium (ATS'10), Shanghai, China, 1-4 December 2010, pp. 3-8
2010
DOI URL PDF 
Keywords: Fault simulation; multi-level; transaction-level modeling
Abstract: In recent technology nodes, reliability is considered a part of the standard design flow at all levels of embedded system design. While techniques that use only low-level models at gate- and register transfer-level offer high accuracy, they are too inefficient to consider the overall application of the embedded system. Multi-level models with high abstraction are essential to efficiently evaluate the impact of physical defects on the system. This paper provides a methodology that leverages state-of-the-art techniques for efficient fault simulation of structural faults together with transaction-level modeling. This way it is possible to accurately evaluate the impact of the faults on the entire hardware/software system. A case study of a system consisting of hardware and software for image compression and data encryption is presented and the method is compared to a standard gate/RT mixed-level approach.
BibTeX:
@inproceedings{KochtZBIWHDP2010b,
  author = {Kochte, Michael A. and Zoellin, Christian G. and Baranowski, Rafal and Imhof, Michael E. and Wunderlich, Hans-Joachim and Hatami, Nadereh and Di Carlo, Stefano and Prinetto, Paolo},
  title = {{Efficient Simulation of Structural Faults for the Reliability Evaluation at System-Level}},
  booktitle = {Proceedings of the IEEE 19th Asian Test Symposium (ATS'10)},
  publisher = {IEEE Computer Society},
  year = {2010},
  pages = {3--8},
  keywords = {Fault simulation; multi-level; transaction-level modeling},
  abstract = {In recent technology nodes, reliability is considered a part of the standard design flow at all levels of embedded system design. While techniques that use only low-level models at gate- and register transfer-level offer high accuracy, they are too inefficient to consider the overall application of the embedded system. Multi-level models with high abstraction are essential to efficiently evaluate the impact of physical defects on the system. This paper provides a methodology that leverages state-of-the-art techniques for efficient fault simulation of structural faults together with transaction-level modeling. This way it is possible to accurately evaluate the impact of the faults on the entire hardware/software system. A case study of a system consisting of hardware and software for image compression and data encryption is presented and the method is compared to a standard gate/RT mixed-level approach.},
  url = {http://www.computer.org/csdl/proceedings/ats/2010/4248/00/4248a003-abs.html},
  doi = {http://dx.doi.org/10.1109/ATS.2010.10},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/ATS_KochtZBIWHDP2010.pdf}
}
160. System Reliability Evaluation Using Concurrent Multi-Level Simulation of Structural Faults
Kochte, M.A., Zoellin, C.G., Baranowski, R., Imhof, M.E., Wunderlich, H.-J., Hatami, N., Di Carlo, S. and Prinetto, P.
IEEE International Test Conference (ITC'10), Austin, Texas, USA, 31 October-5 November 2010
2010
DOI PDF 
Abstract: This paper provides a methodology that leverages state-of-the-art techniques for efficient fault simulation of structural faults together with transaction level modeling. This way it is possible to accurately evaluate the impact of the faults on the entire hardware/software system.
BibTeX:
@inproceedings{KochtZBIWHDP2010,
  author = {Kochte, Michael A. and Zoellin, Christian G. and Baranowski, Rafal and Imhof, Michael E. and Wunderlich, Hans-Joachim and Hatami, Nadereh and Di Carlo, Stefano and Prinetto, Paolo},
  title = {{System Reliability Evaluation Using Concurrent Multi-Level Simulation of Structural Faults}},
  booktitle = {IEEE International Test Conference (ITC'10)},
  publisher = {IEEE Computer Society},
  year = {2010},
  abstract = {This paper provides a methodology that leverages state-of-the-art techniques for efficient fault simulation of structural faults together with transaction level modeling. This way it is possible to accurately evaluate the impact of the faults on the entire hardware/software system.},
  doi = {http://dx.doi.org/10.1109/TEST.2010.5699309},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/ITC_KochtZBIWHDP2010.pdf}
}
159. Parity Prediction Synthesis for Nano-Electronic Gate Designs
Tran, D.A., Virazel, A., Bosio, A., Dilillo, L., Girard, P., Pravossoudovitch, S. and Wunderlich, H.-J.
IEEE International Test Conference (ITC'10), Austin, Texas, USA, 31 October-5 November 2010
2010
DOI PDF 
Abstract: In this paper we investigate the possibility of using commercial synthesis tools to build parity predictors for nano-electronic gates designs. They will be used as redundant resources for robustness improvement for future CMOS technology nodes.
BibTeX:
@inproceedings{TranVBDGPW2010,
  author = {Tran, Duc Anh and Virazel, Arnaud and Bosio, Alberto and Dilillo, Luigi and Girard, Patrick and Pravossoudovitch, Serge and Wunderlich, Hans-Joachim},
  title = {{Parity Prediction Synthesis for Nano-Electronic Gate Designs}},
  booktitle = {IEEE International Test Conference (ITC'10)},
  publisher = {IEEE Computer Society},
  year = {2010},
  abstract = {In this paper we investigate the possibility of using commercial synthesis tools to build parity predictors for nano-electronic gates designs. They will be used as redundant resources for robustness improvement for future CMOS technology nodes.},
  doi = {http://dx.doi.org/10.1109/TEST.2010.5699312},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/ITC_TranVBDGPW2010.pdf}
}
158. Efficient Concurrent Self-Test with Partially Specified Patterns
Kochte, M.A., Zoellin, C.G. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 26(5), October 2010, pp. 581-594
2010
DOI URL  
Keywords: Concurrent self-test; BIST; Test generation; VLSI
Abstract: Structural on-line self-test may be performed to detect permanent faults and avoid their accumulation in the system. This paper improves existing techniques for concurrent BIST that are based on a deterministic test set. Here, the test patterns are specially generated with a small number of specified bits. This results in very low test length and fault detection latency, which allows to frequently test critical faults. As a consequence, the likelihood of fault accumulation is reduced. Experiments with benchmark circuits show that the hardware overhead is significantly lower than the overhead of the state of the art. Moreover, a case-study on a super-scalar RISC processor demonstrates the feasibility of the method.
BibTeX:
@article{KochtZW2010,
  author = {Kochte, Michael A. and Zoellin, Christian G. and Wunderlich, Hans-Joachim},
  title = {{Efficient Concurrent Self-Test with Partially Specified Patterns}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {2010},
  volume = {26},
  number = {5},
  pages = {581--594},
  keywords = {Concurrent self-test; BIST; Test generation; VLSI},
  abstract = {Structural on-line self-test may be performed to detect permanent faults and avoid their accumulation in the system. This paper improves existing techniques for concurrent BIST that are based on a deterministic test set. Here, the test patterns are specially generated with a small number of specified bits. This results in very low test length and fault detection latency, which allows to frequently test critical faults. As a consequence, the likelihood of fault accumulation is reduced. Experiments with benchmark circuits show that the hardware overhead is significantly lower than the overhead of the state of the art. Moreover, a case-study on a super-scalar RISC processor demonstrates the feasibility of the method.},
  url = {http://dl.acm.org/citation.cfm?id=1897730.1897739},
  doi = {http://dx.doi.org/10.1007/s10836-010-5167-6}
}
157. Effiziente Simulation von strukturellen Fehlern für die Zuverlässigkeitsanalyse auf Systemebene
Kochte, M.A., Zöllin, C.G., Baranowski, R., Imhof, M.E., Wunderlich, H.-J., Hatami, N., Di Carlo, S. and Prinetto, P.
4. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'10)
Vol. 66, Wildbad Kreuth, Germany, 13-15 September 2010, pp. 25-32
2010
URL PDF 
Keywords: Transaktionsebenen-Modellierung; Ebenenübergreifende Fehlersimulation
Abstract: In aktueller Prozesstechnologie muss die Zuverlässigkeit in allen Entwurfsschritten von eingebetteten Systemen betrachtet werden. Methoden, die nur Modelle auf unteren Abstraktionsebenen, wie Gatter- oder Registertransferebene, verwenden, bieten zwar eine hohe Genauigkeit, sind aber zu ineffizient, um komplexe Hardware/Software-Systeme zu analysieren. Hier werden ebenenübergreifende Verfahren benötigt, die auch hohe Abstraktion unterstützen, um effizient die Auswirkungen von Defekten im System bewerten zu können. Diese Arbeit stellt eine Methode vor, die aktuelle Techniken für die effiziente Simulation von strukturellen Fehlern mit Systemmodellierung auf Transaktionsebene kombiniert. Auf dieseWeise ist es möglich, eine präzise Bewertung der Fehlerauswirkung auf das gesamte Hardware/Software-System durchzuführen. Die Ergebnisse einer Fallstudie eines Hardware/Software-Systems zur Datenverschlüsselung und Bildkompression werden diskutiert und die Methode wird mit einem Standard-Fehlerinjektionsverfahren verglichen.
BibTeX:
@inproceedings{KochtZBIWHDP2010a,
  author = {Kochte, Michael A. and Zöllin, Christian G. and Baranowski, Rafal and Imhof, Michael E. and Wunderlich, Hans-Joachim and Hatami, Nadereh and Di Carlo, Stefano and Prinetto, Paolo},
  title = {{Effiziente Simulation von strukturellen Fehlern für die Zuverlässigkeitsanalyse auf Systemebene}},
  booktitle = {4. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'10)},
  publisher = {VDE VERLAG GMBH},
  year = {2010},
  volume = {66},
  pages = {25--32},
  keywords = {Transaktionsebenen-Modellierung; Ebenenübergreifende Fehlersimulation},
  abstract = {In aktueller Prozesstechnologie muss die Zuverlässigkeit in allen Entwurfsschritten von eingebetteten Systemen betrachtet werden. Methoden, die nur Modelle auf unteren Abstraktionsebenen, wie Gatter- oder Registertransferebene, verwenden, bieten zwar eine hohe Genauigkeit, sind aber zu ineffizient, um komplexe Hardware/Software-Systeme zu analysieren. Hier werden ebenenübergreifende Verfahren benötigt, die auch hohe Abstraktion unterstützen, um effizient die Auswirkungen von Defekten im System bewerten zu können. Diese Arbeit stellt eine Methode vor, die aktuelle Techniken für die effiziente Simulation von strukturellen Fehlern mit Systemmodellierung auf Transaktionsebene kombiniert. Auf dieseWeise ist es möglich, eine präzise Bewertung der Fehlerauswirkung auf das gesamte Hardware/Software-System durchzuführen. Die Ergebnisse einer Fallstudie eines Hardware/Software-Systems zur Datenverschlüsselung und Bildkompression werden diskutiert und die Methode wird mit einem Standard-Fehlerinjektionsverfahren verglichen.},
  url = {http://www.vde-verlag.de/proceedings-de/453299003.html},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/ZuE_KochtZBIWHCP2010.pdf}
}
156. Algorithmen-basierte Fehlertoleranz für Many-Core-Architekturen;
Algorithm-based Fault-Tolerance on Many-Core Architectures

Braun, C. and Wunderlich, H.-J.
it - Information Technology
Vol. 52(4), August 2010, pp. 209-215
2010
DOI  
Keywords: Zuverlässigkeit; Fehlertoleranz; parallele Architekturen; parallele Programmierung
Abstract: Moderne Many-Core-Architekturen bieten ein sehr hohes Potenzial an Rechenleistung. Dies macht sie besonders für Anwendungen aus dem Bereich des wissenschaftlichen Hochleistungsrechnens und der Simulationstechnik attraktiv. Die Architekturen folgen dabei einem Ausführungsparadigma, das sich am besten durch den Begriff ?Many-Threading? beschreiben lässt. Wie alle nanoelektronischen Halbleiterschaltungen leiden auch Many-Core-Prozessoren potentiell unter störenden Einflüssen von transienten Fehlern (soft errors) und diversen Arten von Variationen. Diese Faktoren können die Zuverlässigkeit von Systemen negativ beeinflussen und erfordern Fehlertoleranz auf allen Ebenen, von der Hardware bis zur Software. Auf der Softwareseite stellt die Algorithmen-basierte Fehlertoleranz (ABFT) eine ausgereifte Technik zur Verbesserung der Zuverlässigkeit dar. Der Aufwand für die Anpassung dieser Technik an moderne Many-Threading-Architekturen darf jedoch keinesfalls unterschätzt werden. In diesem Beitrag wird eine effiziente und fehlertolerante Abbildung der Matrixmultiplikation auf eine moderne Many-Core-Architektur präsentiert. Die Fehlertoleranz ist dabei integraler Bestandteil der Abbildung und wird durch ein ABFT-Schema realisiert, das die Leistung nur unwesentlich beeinträchtigt.

Modern many-core architectures provide a high computational potential, which makes them particularly interesting for applications from the fields of scientific high-performance computing and simulation technology. The execution paradigm of these architectures is best described as “Many-Threading”. Like all nano-scaled semiconductor devices, many-core processors are prone to transient errors (soft errors) and different kinds of variations that can have severe impact on the reliability of such systems. Therefore, fault-tolerance has to be incorporated at all levels, from the hardware up to the software. On the software side, Algorithm-based Fault Tolerance (ABFT) is a mature technique to improve the reliability. However, significant effort is required to adapt this technique to modern many-threading architectures. In this article, an efficient and fault-tolerant mapping of the matrix multiplication to a modern many-core architecture is presented. Fault-tolerance is thereby an integral part of the mapping and implemented through an ABFT scheme with marginal impact on the overall performance.

BibTeX:
@article{BraunW2010a,
  author = {Braun, Claus and Wunderlich, Hans-Joachim},
  title = {{Algorithmen-basierte Fehlertoleranz für Many-Core-Architekturen;
Algorithm-based Fault-Tolerance on Many-Core Architectures}}, journal = {it - Information Technology}, publisher = {Oldenbourg Wissenschaftsverlag}, year = {2010}, volume = {52}, number = {4}, pages = {209--215}, keywords = {Zuverlässigkeit; Fehlertoleranz; parallele Architekturen; parallele Programmierung}, abstract = {Moderne Many-Core-Architekturen bieten ein sehr hohes Potenzial an Rechenleistung. Dies macht sie besonders für Anwendungen aus dem Bereich des wissenschaftlichen Hochleistungsrechnens und der Simulationstechnik attraktiv. Die Architekturen folgen dabei einem Ausführungsparadigma, das sich am besten durch den Begriff ?Many-Threading? beschreiben lässt. Wie alle nanoelektronischen Halbleiterschaltungen leiden auch Many-Core-Prozessoren potentiell unter störenden Einflüssen von transienten Fehlern (soft errors) und diversen Arten von Variationen. Diese Faktoren können die Zuverlässigkeit von Systemen negativ beeinflussen und erfordern Fehlertoleranz auf allen Ebenen, von der Hardware bis zur Software. Auf der Softwareseite stellt die Algorithmen-basierte Fehlertoleranz (ABFT) eine ausgereifte Technik zur Verbesserung der Zuverlässigkeit dar. Der Aufwand für die Anpassung dieser Technik an moderne Many-Threading-Architekturen darf jedoch keinesfalls unterschätzt werden. In diesem Beitrag wird eine effiziente und fehlertolerante Abbildung der Matrixmultiplikation auf eine moderne Many-Core-Architektur präsentiert. Die Fehlertoleranz ist dabei integraler Bestandteil der Abbildung und wird durch ein ABFT-Schema realisiert, das die Leistung nur unwesentlich beeinträchtigt.

Modern many-core architectures provide a high computational potential, which makes them particularly interesting for applications from the fields of scientific high-performance computing and simulation technology. The execution paradigm of these architectures is best described as “Many-Threading”. Like all nano-scaled semiconductor devices, many-core processors are prone to transient errors (soft errors) and different kinds of variations that can have severe impact on the reliability of such systems. Therefore, fault-tolerance has to be incorporated at all levels, from the hardware up to the software. On the software side, Algorithm-based Fault Tolerance (ABFT) is a mature technique to improve the reliability. However, significant effort is required to adapt this technique to modern many-threading architectures. In this article, an efficient and fault-tolerant mapping of the matrix multiplication to a modern many-core architecture is presented. Fault-tolerance is thereby an integral part of the mapping and implemented through an ABFT scheme with marginal impact on the overall performance.}, doi = {http://dx.doi.org/10.1524/itit.2010.0593} }

155. Massive Statistical Process Variations: A Grand Challenge for Testing Nanoelectronic Circuits
Becker, B., Hellebrand, S., Polian, I., Straube, B., Vermeiren, W. and Wunderlich, H.-J.
Proceedings of the 4th Workshop on Dependable and Secure Nanocomputing (DSN-W'10), Chicago, Illinois, USA, 28 June-1 July 2010, pp. 95-100
2010
DOI PDF 
Abstract: Increasing parameter variations, high defect densities and a growing susceptibility to external noise in nanoscale technologies have led to a paradigm shift in design. Classical design strategies based on worst-case or average assumptions have been replaced by statistical design, and new robust and variation tolerant architectures have been developed. At the same time testing has become extremely challenging, as parameter variations may lead to an unacceptable behavior or change the impact of defects. Furthermore, for robust designs a precise quality assessment is required particularly showing the remaining robustness in the presence of manufacturing defects. The paper pinpoints the key challenges for testing nanoelectronic circuits in more detail, covering the range of variation-aware fault modeling via methods for statiscal testing and their algorithmic foundations to robustness analysis and quality binning.
BibTeX:
@inproceedings{BeckeHPSVW2010,
  author = {Becker, Bernd and Hellebrand, Sybille and Polian, Ilia and Straube, Bernd and Vermeiren, Wolfgang and Wunderlich, Hans-Joachim},
  title = {{Massive Statistical Process Variations: A Grand Challenge for Testing Nanoelectronic Circuits}},
  booktitle = {Proceedings of the 4th Workshop on Dependable and Secure Nanocomputing (DSN-W'10)},
  publisher = {IEEE Computer Society},
  year = {2010},
  pages = {95--100},
  abstract = {Increasing parameter variations, high defect densities and a growing susceptibility to external noise in nanoscale technologies have led to a paradigm shift in design. Classical design strategies based on worst-case or average assumptions have been replaced by statistical design, and new robust and variation tolerant architectures have been developed. At the same time testing has become extremely challenging, as parameter variations may lead to an unacceptable behavior or change the impact of defects. Furthermore, for robust designs a precise quality assessment is required particularly showing the remaining robustness in the presence of manufacturing defects. The paper pinpoints the key challenges for testing nanoelectronic circuits in more detail, covering the range of variation-aware fault modeling via methods for statiscal testing and their algorithmic foundations to robustness analysis and quality binning.},
  doi = {http://dx.doi.org/10.1109/DSNW.2010.5542612},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/WDSN_BeckerHPSVW2010.pdf}
}
154. Efficient Fault Simulation on Many-Core Processors
Kochte, M.A., Schaal, M., Wunderlich, H.-J. and Zoellin, C.G.
Proceedings of the 47th ACM/IEEE Design Automation Conference (DAC'10), Anaheim, California, USA, 13-18 June 2010, pp. 380-385
2010
DOI URL PDF 
Keywords: Parallel Fault Simulation, Many-Core Processors, PPSFP
Abstract: Fault simulation is essential in test generation, design for test and reliability assessment of integrated circuits. Reliability analysis and the simulation of self-test structures are particularly computationally expensive as a large number of patterns has to be evaluated.
In this work, we propose to map a fault simulation algorithm based on the parallel-pattern single-fault propagation (PPSFP) paradigm to many-core architectures and describe the involved algorithmic optimizations. Many-core architectures are characterized by a high number of simple execution units with small local memory. The proposed fault simulation algorithm exploits the parallelism of these architectures by use of parallel data structures. The algorithm is implemented for the NVIDIA GT200 Graphics Processing Unit (GPU) architecture and achieves a speed-up of up to 17x compared to an existing GPU fault-simulation algorithm and up to 16x compared to state-of-the-art algorithms on conventional processor architectures.
BibTeX:
@inproceedings{KochtSWZ2010,
  author = {Kochte, Michael A. and Schaal, Marcel and Wunderlich, Hans-Joachim and Zoellin, Christian G.},
  title = {{Efficient Fault Simulation on Many-Core Processors}},
  booktitle = {Proceedings of the 47th ACM/IEEE Design Automation Conference (DAC'10)},
  publisher = {ACM},
  year = {2010},
  pages = {380--385},
  keywords = {Parallel Fault Simulation, Many-Core Processors, PPSFP},
  abstract = {Fault simulation is essential in test generation, design for test and reliability assessment of integrated circuits. Reliability analysis and the simulation of self-test structures are particularly computationally expensive as a large number of patterns has to be evaluated.
In this work, we propose to map a fault simulation algorithm based on the parallel-pattern single-fault propagation (PPSFP) paradigm to many-core architectures and describe the involved algorithmic optimizations. Many-core architectures are characterized by a high number of simple execution units with small local memory. The proposed fault simulation algorithm exploits the parallelism of these architectures by use of parallel data structures. The algorithm is implemented for the NVIDIA GT200 Graphics Processing Unit (GPU) architecture and achieves a speed-up of up to 17x compared to an existing GPU fault-simulation algorithm and up to 16x compared to state-of-the-art algorithms on conventional processor architectures.}, url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5523154}, doi = {http://dx.doi.org/10.1145/1837274.1837369}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/DAC_KochteSWZ2010.pdf} }
153. Algorithm-Based Fault Tolerance for Many-Core Architectures
Braun, C. and Wunderlich, H.-J.
Proceedings of the 15th IEEE European Test Symposium (ETS'10), Praha, Czech Republic, 24-28 May 2010, pp. 253-253
2010
DOI PDF 
Abstract: Modern many-core architectures with hundreds of cores provide a high computational potential. This makes them particularly interesting for scientific high-performance computing and simulation technology. Like all nano scaled semiconductor devices, many-core processors are prone to reliability harming factors like variations and soft errors. One way to improve the reliability of such systems is software-based hardware fault tolerance. Here, the software is able to detect and correct errors introduced by the hardware. In this work, we propose a software-based approach to improve the reliability of matrix operations on many-core processors. These operations are key components in many scientific applications.
BibTeX:
@inproceedings{BraunW2010,
  author = {Braun, Claus and Wunderlich, Hans-Joachim},
  title = {{Algorithm-Based Fault Tolerance for Many-Core Architectures}},
  booktitle = {Proceedings of the 15th IEEE European Test Symposium (ETS'10)},
  publisher = {IEEE Computer Society},
  year = {2010},
  pages = {253--253},
  abstract = {Modern many-core architectures with hundreds of cores provide a high computational potential. This makes them particularly interesting for scientific high-performance computing and simulation technology. Like all nano scaled semiconductor devices, many-core processors are prone to reliability harming factors like variations and soft errors. One way to improve the reliability of such systems is software-based hardware fault tolerance. Here, the software is able to detect and correct errors introduced by the hardware. In this work, we propose a software-based approach to improve the reliability of matrix operations on many-core processors. These operations are key components in many scientific applications.},
  doi = {http://dx.doi.org/10.1109/ETSYM.2010.5512738},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/ETS_BraunW2010.pdf}
}
152. Low-Power Test Planning for Arbitrary At-Speed Delay-Test Clock Schemes
Zoellin, C.G. and Wunderlich, H.-J.
Proceedings of the 28th VLSI Test Symposium (VTS'10), Santa Cruz, California, USA, 19-22 April 2010, pp. 93-98
2010
DOI PDF 
Keywords: Delay test; power-aware testing; built-in self-test
Abstract: High delay-fault coverage requires rather sophisticated clocking schemes in test mode, which usually combine launch-on-shift and launch-on-capture strategies. These complex clocking schemes make low power test planning more difficult as initialization, justification and propagation require multiple clock cycles. This paper describes a unified method to map the sequential test planning problem to a combinational circuit representation. The combinational representation is subject to known algorithms for efficient low power built-in self-test planning. Experimental results for a set of industrial circuits show that even rather complex test clocking schemes lead to an efficient low power test plan.
BibTeX:
@inproceedings{ZoelliW2010,
  author = {Zoellin, Christian G. and Wunderlich, Hans-Joachim},
  title = {{Low-Power Test Planning for Arbitrary At-Speed Delay-Test Clock Schemes}},
  booktitle = {Proceedings of the 28th VLSI Test Symposium (VTS'10)},
  publisher = {IEEE Computer Society},
  year = {2010},
  pages = {93--98},
  keywords = {Delay test; power-aware testing; built-in self-test},
  abstract = {High delay-fault coverage requires rather sophisticated clocking schemes in test mode, which usually combine launch-on-shift and launch-on-capture strategies. These complex clocking schemes make low power test planning more difficult as initialization, justification and propagation require multiple clock cycles. This paper describes a unified method to map the sequential test planning problem to a combinational circuit representation. The combinational representation is subject to known algorithms for efficient low power built-in self-test planning. Experimental results for a set of industrial circuits show that even rather complex test clocking schemes lead to an efficient low power test plan.},
  doi = {http://dx.doi.org/10.1109/VTS.2010.5469607},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/VTS_ZoellinW2010.pdf}
}
151. BISD: Scan-Based Built-In Self-Diagnosis
Elm, M. and Wunderlich, H.-J.
Proceedings of the ACM/IEEE Design Automation and Test in Europe (DATE'10), Dresden, Germany, 8-12 March 2010, pp. 1243-1248
2010
URL PDF 
Keywords: Logic BIST, Diagnosis
Abstract: Built-In Self-Test (BIST) is less often applied to random logic than to embedded memories due to the following reasons: Firstly, for a satisfiable fault coverage it may be necessary to apply additional deterministic patterns, which cause additional hardware costs. Secondly, the BIST-signature reveals only poor diagnostic information. Recently, the first issue has been addressed successfully. The paper at hand proposes a viable, effective and cost efficient solution for the second problem.

The paper presents a new method for Built-In Self-Diagnosis (BISD). The core of the method is an extreme response compaction architecture, which for the first time enables an autonomous on-chip evaluation of test responses with negligible hardware overhead. The key advantage of this architecture is that all data, which is relevant for a subsequent diagnosis, is gathered during just one test session.

The BISD method comprises a hardware scheme, a test pattern generation approach and a diagnosis algorithm. Experiments conducted with industrial designs substantiate that the additional hardware overhead introduced by the BISD method is on average about 15% of the BIST area, and the same diagnostic resolution can be obtained as for external testing.

BibTeX:
@inproceedings{ElmW2010,
  author = {Elm, Melanie and Wunderlich, Hans-Joachim},
  title = {{BISD: Scan-Based Built-In Self-Diagnosis}},
  booktitle = {Proceedings of the ACM/IEEE Design Automation and Test in Europe (DATE'10)},
  publisher = {IEEE Computer Society},
  year = {2010},
  pages = {1243--1248},
  keywords = {Logic BIST, Diagnosis},
  abstract = {Built-In Self-Test (BIST) is less often applied to random logic than to embedded memories due to the following reasons: Firstly, for a satisfiable fault coverage it may be necessary to apply additional deterministic patterns, which cause additional hardware costs. Secondly, the BIST-signature reveals only poor diagnostic information. Recently, the first issue has been addressed successfully. The paper at hand proposes a viable, effective and cost efficient solution for the second problem. 

The paper presents a new method for Built-In Self-Diagnosis (BISD). The core of the method is an extreme response compaction architecture, which for the first time enables an autonomous on-chip evaluation of test responses with negligible hardware overhead. The key advantage of this architecture is that all data, which is relevant for a subsequent diagnosis, is gathered during just one test session.

The BISD method comprises a hardware scheme, a test pattern generation approach and a diagnosis algorithm. Experiments conducted with industrial designs substantiate that the additional hardware overhead introduced by the BISD method is on average about 15% of the BIST area, and the same diagnostic resolution can be obtained as for external testing.}, url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5456997}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2010/DATE_ElmW2010.pdf} }

150. XP-SISR: Eingebaute Selbstdiagnose für Schaltungen mit Prüfpfad
Elm, M. and Wunderlich, H.-J.
3. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'09)
Vol. 61, Stuttgart, Germany, 21-23 September 2009, pp. 21-28
Best paper award
2009
URL PDF 
Keywords: Logic BIST; Diagnosis
Abstract: Die Vorteile des Eingebauten Selbsttests (BIST --- Built-In Self-Test) sind bekannt, für eingebettete Speicher ist BIST sogar die bevorzugte Teststrategie. Für freie Logik wird BIST deutlich seltener eingesetzt. Grund hierfür ist zum einen, dass deterministische Testmuster für eine hohe Fehlerabdeckung benötigt werden und diese im Selbsttest hohe Kosten verursachen. Zum anderen lassen sich aus den Testantworten, die zu einer einzigen Signatur kompaktiert werden, nur wenige diagnostische Informationen ziehen. In den vergangenen Jahren wurden kontinuierlich Fortschritte zur Lösung des ersten Problems erzielt. Dieser Beitrag befasst sich mit der Lösung des zweiten Problems.
Eine neue Methode für die Eingebaute Selbstdiagnose (BISD --- Built-In Self-Diagnosis) wird vorgeschlagen. Kern der Methode ist eine kombinierte, extreme Raum- und Zeitkompaktierung, die es erstmals ermöglicht, erwartete Antworten und fehlerhafte Antworten mit vernachlässigbarem Aufwand auf dem zu testenden Chip zu speichern. Somit können in einer einzigen Selbsttestsitzung pro Chip alle zur Diagnose notwendigen Daten gesammelt werden.
Das BISD Schema umfasst neben der Kompaktierungshardware einen Diagnosealgorithmus und ein Verfahren zur Testmustererzeugung, die Aliasingeffekte und die durch die starke Kompaktierung verringerte diagnostische Auflösung kompensieren können. Experimente mit aktuellen, industriellen Schaltungen zeigen, dass die diagnostische Auflösung im Vergleich zum externen Test erhalten bleibt und der zusätzliche Hardware-Aufwand zu vernachlässigen ist.

The advantages of Built-In Self-Test (BIST) are well known, and for embedded memories BIST is already the preferred test method. However, for random logic BIST is less often employed. This is mainly due to the following two reasons: On the one hand, deterministic patterns might be necessary to achieve reasonable fault coverage, yet they are expensive in built-in tests. On the other hand, the diagnostic information provided by BIST-signatures is rather poor. During the last years the first issue has been tackled successfully. This paper deals with the second issue.
A new method for Built-In Self-Diagnosis (BISD) is presented. The method's backbone is a combination of extreme space and time compaction, which for the first time allows to store the expected test responses and the failing test responses with negligible overhead on chip. Consequently, all data relevant to diagnosis can be collected during a single self-test session.
The BISD method additionally comprises a diagnosis algorithm and a test pattern generation scheme, which overcome aliasing and the reduced diagnostic resolution introduced by the extreme compaction. Experiments with recent, industrial designs demonstrate, that diagnostic resolution is maintaned compared to external testing and the additional hardware needed to implement the BISD-scheme is negligibly small.

BibTeX:
@inproceedings{ElmW2009,
  author = {Elm, Melanie and Wunderlich, Hans-Joachim},
  title = {{XP-SISR: Eingebaute Selbstdiagnose für Schaltungen mit Prüfpfad}},
  booktitle = {3. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'09)},
  publisher = {VDE VERLAG GMBH},
  year = {2009},
  volume = {61},
  pages = {21--28},
  keywords = {Logic BIST; Diagnosis},
  abstract = {Die Vorteile des Eingebauten Selbsttests (BIST --- Built-In Self-Test) sind bekannt, für eingebettete Speicher ist BIST sogar die bevorzugte Teststrategie. Für freie Logik wird BIST deutlich seltener eingesetzt. Grund hierfür ist zum einen, dass deterministische Testmuster für eine hohe Fehlerabdeckung benötigt werden und diese im Selbsttest hohe Kosten verursachen. Zum anderen lassen sich aus den Testantworten, die zu einer einzigen Signatur kompaktiert werden, nur wenige diagnostische Informationen ziehen. In den vergangenen Jahren wurden kontinuierlich Fortschritte zur Lösung des ersten Problems erzielt. Dieser Beitrag befasst sich mit der Lösung des zweiten Problems.
Eine neue Methode für die Eingebaute Selbstdiagnose (BISD --- Built-In Self-Diagnosis) wird vorgeschlagen. Kern der Methode ist eine kombinierte, extreme Raum- und Zeitkompaktierung, die es erstmals ermöglicht, erwartete Antworten und fehlerhafte Antworten mit vernachlässigbarem Aufwand auf dem zu testenden Chip zu speichern. Somit können in einer einzigen Selbsttestsitzung pro Chip alle zur Diagnose notwendigen Daten gesammelt werden.
Das BISD Schema umfasst neben der Kompaktierungshardware einen Diagnosealgorithmus und ein Verfahren zur Testmustererzeugung, die Aliasingeffekte und die durch die starke Kompaktierung verringerte diagnostische Auflösung kompensieren können. Experimente mit aktuellen, industriellen Schaltungen zeigen, dass die diagnostische Auflösung im Vergleich zum externen Test erhalten bleibt und der zusätzliche Hardware-Aufwand zu vernachlässigen ist.

The advantages of Built-In Self-Test (BIST) are well known, and for embedded memories BIST is already the preferred test method. However, for random logic BIST is less often employed. This is mainly due to the following two reasons: On the one hand, deterministic patterns might be necessary to achieve reasonable fault coverage, yet they are expensive in built-in tests. On the other hand, the diagnostic information provided by BIST-signatures is rather poor. During the last years the first issue has been tackled successfully. This paper deals with the second issue.
A new method for Built-In Self-Diagnosis (BISD) is presented. The method's backbone is a combination of extreme space and time compaction, which for the first time allows to store the expected test responses and the failing test responses with negligible overhead on chip. Consequently, all data relevant to diagnosis can be collected during a single self-test session.
The BISD method additionally comprises a diagnosis algorithm and a test pattern generation scheme, which overcome aliasing and the reduced diagnostic resolution introduced by the extreme compaction. Experiments with recent, industrial designs demonstrate, that diagnostic resolution is maintaned compared to external testing and the additional hardware needed to implement the BISD-scheme is negligibly small.}, url = {http://www.vde-verlag.de/proceedings-de/453178004.html}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2009/ZuE_ElmW2009.pdf} }

149. Adaptive Debug and Diagnosis Without Fault Dictionaries
Holst, S. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 25(4-5), August 2009, pp. 259-268
2009
DOI URL PDF 
Keywords: Diagnosis; Debug; Test; VLSI
Abstract: Diagnosis is essential in modern chip production to increase yield, and debug constitutes a major part in the pre-silicon development process. For recent process technologies, defect mechanisms are increasingly complex, and continuous efforts are made to model these defects by using sophisticated fault models. Traditional static approaches for debug and diagnosis with a simplified fault model are more and more limited. In this paper, a method is presented, which identifies possible faulty regions in a combinational circuit, based on its input/output behavior and independent of a fault model. The new adaptive, statistical approach is named POINTER for 'Partially Overlapping Impact couNTER' and combines a flexible and powerful effect-cause pattern analysis algorithm with high-resolution ATPG. We show the effectiveness of the approach through experiments with benchmark and industrial circuits. In addition, even without additional patterns this analysis method provides good resolution for volume diagnosis, too.
BibTeX:
@article{HolstW2009a,
  author = {Holst, Stefan and Wunderlich, Hans-Joachim},
  title = {{Adaptive Debug and Diagnosis Without Fault Dictionaries}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {2009},
  volume = {25},
  number = {4-5},
  pages = {259--268},
  keywords = {Diagnosis; Debug; Test; VLSI},
  abstract = {Diagnosis is essential in modern chip production to increase yield, and debug constitutes a major part in the pre-silicon development process. For recent process technologies, defect mechanisms are increasingly complex, and continuous efforts are made to model these defects by using sophisticated fault models. Traditional static approaches for debug and diagnosis with a simplified fault model are more and more limited. In this paper, a method is presented, which identifies possible faulty regions in a combinational circuit, based on its input/output behavior and independent of a fault model. The new adaptive, statistical approach is named POINTER for 'Partially Overlapping Impact couNTER' and combines a flexible and powerful effect-cause pattern analysis algorithm with high-resolution ATPG. We show the effectiveness of the approach through experiments with benchmark and industrial circuits. In addition, even without additional patterns this analysis method provides good resolution for volume diagnosis, too.},
  url = {http://dl.acm.org/citation.cfm?id=1644216.1644221},
  doi = {http://dx.doi.org/10.1007/s10836-009-5109-3},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2009/JETTA_HolstW2009a.pdf}
}
148. Test Encoding for Extreme Response Compaction
Kochte, M.A., Holst, S., Elm, M. and Wunderlich, H.-J.
Proceedings of the 14th IEEE European Test Symposium (ETS'09), Sevilla, Spain, 25-29 May 2009, pp. 155-160
2009
DOI URL PDF 
Keywords: Design for Test; Embedded Diagnosis; Response Compaction; Test Compression
Abstract: Optimizing bandwidth by compression and compaction always has to solve the trade-off between input bandwidth reduction and output bandwidth reduction. Recently it has been shown that splitting scan chains into shorter segments and compacting the shift data outputs into a singleparity bit reduces the test response data to one bit per cycle without affecting fault coverage and diagnostic resolution if the compactor's structure is included into the ATPG process.

This test data reduction at the output side comes with challenges at the input side. The bandwidth requirement grows due to the increased number of chains and due to a drastically decreased amount of don't care values in the test patterns.

The paper at hand presents a new iterative approach to test set encoding which optimizes bandwidth on both input and output side while keeping the diagnostic resolution and fault coverage. Experiments with industrial designs demonstrate that test application time, test data volume and diagnostic resolution are improved at the same time and for most designs testing with a bandwidth of three bits per cycle is possible.

BibTeX:
@inproceedings{KochtHEW2009,
  author = {Kochte, Michael A. and Holst, Stefan and Elm, Melanie and Wunderlich, Hans-Joachim},
  title = {{Test Encoding for Extreme Response Compaction}},
  booktitle = {Proceedings of the 14th IEEE European Test Symposium (ETS'09)},
  publisher = {IEEE Computer Society},
  year = {2009},
  pages = {155--160},
  keywords = {Design for Test; Embedded Diagnosis; Response Compaction; Test Compression},
  abstract = {Optimizing bandwidth by compression and compaction always has to solve the trade-off between input bandwidth reduction and output bandwidth reduction. Recently it has been shown that splitting scan chains into shorter segments and compacting the shift data outputs into a singleparity bit reduces the test response data to one bit per cycle without affecting fault coverage and diagnostic resolution if the compactor's structure is included into the ATPG process.

This test data reduction at the output side comes with challenges at the input side. The bandwidth requirement grows due to the increased number of chains and due to a drastically decreased amount of don't care values in the test patterns.

The paper at hand presents a new iterative approach to test set encoding which optimizes bandwidth on both input and output side while keeping the diagnostic resolution and fault coverage. Experiments with industrial designs demonstrate that test application time, test data volume and diagnostic resolution are improved at the same time and for most designs testing with a bandwidth of three bits per cycle is possible.}, url = {http://www.computer.org/csdl/proceedings/ets/2009/3703/00/3703a155-abs.html}, doi = {http://dx.doi.org/10.1109/ETS.2009.22}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2009/ETS_KochtHEW2009.pdf} }

147. Concurrent Self-Test with Partially Specified Patterns For Low Test Latency and Overhead
Kochte, M.A., Zoellin, C.G. and Wunderlich, H.-J.
Proceedings of the 14th IEEE European Test Symposium (ETS'09), Sevilla, Spain, 25-29 May 2009, pp. 53-58
2009
DOI URL PDF 
Keywords: BIST; Concurrent self test; test generation
Abstract: Structural on-line self-test may be performed to detect permanent faults and avoid their accumulation. This paper improves concurrent BIST techniques based on a deterministic test set. Here, the test patterns are specially generated with a small number of specified bits. This results in very low test latency, which reduces the likelihood of fault accumulation. Experiments with a large number of circuits show that the hardware overhead is significantly lower than the overhead for previously published methods. Furthermore, the method allows to tradeoff fault coverage, test latency and hardware overhead.
BibTeX:
@inproceedings{KochtZW2009,
  author = {Kochte, Michael A. and Zoellin, Christian G. and Wunderlich, Hans-Joachim},
  title = {{Concurrent Self-Test with Partially Specified Patterns For Low Test Latency and Overhead}},
  booktitle = {Proceedings of the 14th IEEE European Test Symposium (ETS'09)},
  publisher = {IEEE Computer Society},
  year = {2009},
  pages = {53--58},
  keywords = {BIST; Concurrent self test; test generation},
  abstract = {Structural on-line self-test may be performed to detect permanent faults and avoid their accumulation. This paper improves concurrent BIST techniques based on a deterministic test set. Here, the test patterns are specially generated with a small number of specified bits. This results in very low test latency, which reduces the likelihood of fault accumulation. Experiments with a large number of circuits show that the hardware overhead is significantly lower than the overhead for previously published methods. Furthermore, the method allows to tradeoff fault coverage, test latency and hardware overhead.},
  url = {http://www.computer.org/csdl/proceedings/ets/2009/3703/00/3703a053-abs.html},
  doi = {http://dx.doi.org/10.1109/ETS.2009.26},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2009/ETS_KochtZW2009.pdf}
}
146. Restrict Encoding for Mixed-Mode BIST
Hakmi, A.-W., Holst, S., Wunderlich, H.-J., Schlöffel, J., Hapke, F. and Glowatz, A.
Proceedings of the 27th IEEE VLSI Test Symposium (VTS'09), Santa Cruz, California, USA, 3-7 May 2009, pp. 179-184
2009
DOI URL PDF 
Keywords: Deterministic BIST
Abstract: Programmable mixed-mode BIST schemes combine pseudo-random pattern testing and deterministic test. This paper presents a synthesis technique for a mixed-mode BIST scheme which is able to exploit the regularities of a deterministic test pattern set for minimizing the hardware overhead and memory requirements. The scheme saves more than 50% hardware costs compared with the best schemes known so far while complete programmability is still preserved.
BibTeX:
@inproceedings{HakmiHWSHG2009,
  author = {Hakmi, Abdul-Wahid and Holst, Stefan and Wunderlich, Hans-Joachim and Schlöffel, Jürgen and Hapke, Friedrich and Glowatz, Andreas},
  title = {{Restrict Encoding for Mixed-Mode BIST}},
  booktitle = {Proceedings of the 27th IEEE VLSI Test Symposium (VTS'09)},
  publisher = {IEEE Computer Society},
  year = {2009},
  pages = {179--184},
  keywords = {Deterministic BIST},
  abstract = {Programmable mixed-mode BIST schemes combine pseudo-random pattern testing and deterministic test. This paper presents a synthesis technique for a mixed-mode BIST scheme which is able to exploit the regularities of a deterministic test pattern set for minimizing the hardware overhead and memory requirements. The scheme saves more than 50% hardware costs compared with the best schemes known so far while complete programmability is still preserved.},
  url = {http://www.computer.org/csdl/proceedings/vts/2009/3598/00/3598a179-abs.html},
  doi = {http://dx.doi.org/10.1109/VTS.2009.43},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2009/VTS_HakmiHWSHG2009.pdf}
}
145. A Diagnosis Algorithm for Extreme Space Compaction
Holst, S. and Wunderlich, H.-J.
Proceedings of the Conference on Design, Automation and Test in Europe (DATE'09), Nice, France, 20-24 April 2009, pp. 1355-1360
2009
DOI URL PDF 
Keywords: Compaction; Design-for-test; Diagnosis; Embedded diagnosis; Multi-site test
Abstract: During volume testing, test application time, test data volume and high performance automatic test equipment (ATE) are the major cost factors. Embedded testing including built-in self-test (BIST) and multi-site testing are quite effective cost reduction techniques which may make diagnosis more complex. This paper presents a test response compaction scheme and a corresponding diagnosis algorithm which are especially suited for BIST and multi-site testing. The experimental results on industrial designs show, that test time and response data volume reduces significantly and the diagnostic resolution even improves with this scheme. A comparison with X-Compact shows, that simple parity information provides higher diagnostic resolution per response data bit than more complex signatures.
BibTeX:
@inproceedings{HolstW2009,
  author = {Holst, Stefan and Wunderlich, Hans-Joachim},
  title = {{A Diagnosis Algorithm for Extreme Space Compaction}},
  booktitle = {Proceedings of the Conference on Design, Automation and Test in Europe (DATE'09)},
  publisher = {IEEE Computer Society},
  year = {2009},
  pages = {1355--1360},
  keywords = {Compaction; Design-for-test; Diagnosis; Embedded diagnosis; Multi-site test},
  abstract = {During volume testing, test application time, test data volume and high performance automatic test equipment (ATE) are the major cost factors. Embedded testing including built-in self-test (BIST) and multi-site testing are quite effective cost reduction techniques which may make diagnosis more complex. This paper presents a test response compaction scheme and a corresponding diagnosis algorithm which are especially suited for BIST and multi-site testing. The experimental results on industrial designs show, that test time and response data volume reduces significantly and the diagnostic resolution even improves with this scheme. A comparison with X-Compact shows, that simple parity information provides higher diagnostic resolution per response data bit than more complex signatures.},
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5090875},
  doi = {http://dx.doi.org/10.1109/DATE.2009.5090875},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2009/DATE_HolstW2009.pdf}
}
144. Test Exploration and Validation Using Transaction Level Models
Kochte, M.A., Zoellin, C.G., Imhof, M.E., Salimi Khaligh, R., Radetzki, M., Wunderlich, H.-J., Di Carlo, S. and Prinetto, P.
Proceedings of the Conference on Design, Automation and Test in Europe (DATE'09), Nice, France, 20-24 April 2009, pp. 1250-1253
2009
DOI URL PDF 
Keywords: Test of systems-on-chip; design-for-test, transaction level modeling
Abstract: The complexity of the test infrastructure and test strategies in systems-on-chip approaches the complexity of the functional design space. This paper presents test design space exploration and validation of test strategies and schedules using transaction level models (TLMs). All aspects of the test infrastructure such as test access mechanisms, test wrappers, test data compression and test controllers are modeled at transaction level. Since many aspects of testing involve the transfer of a significant amount of test stimuli and responses, the communication-centric view of TLMs suits this purpose exceptionally well. A case study shows how TLMs can be used to efficiently evaluate DfT decisions in early design steps and how to evaluate test scheduling and resource partitioning during test planning. The presented approach has significantly higher simulation efficiency than RTL and gate level approaches.
BibTeX:
@inproceedings{KochtZISRWDP2009,
  author = {Kochte, Michael A. and Zoellin, Christian G. and Imhof, Michael E. and Salimi Khaligh, Rauf and Radetzki, Martin and Wunderlich, Hans-Joachim and Di Carlo, Stefano and Prinetto, Paolo},
  title = {{Test Exploration and Validation Using Transaction Level Models}},
  booktitle = {Proceedings of the Conference on Design, Automation and Test in Europe (DATE'09)},
  publisher = {IEEE Computer Society},
  year = {2009},
  pages = {1250--1253},
  keywords = {Test of systems-on-chip; design-for-test, transaction level modeling},
  abstract = {The complexity of the test infrastructure and test strategies in systems-on-chip approaches the complexity of the functional design space. This paper presents test design space exploration and validation of test strategies and schedules using transaction level models (TLMs). All aspects of the test infrastructure such as test access mechanisms, test wrappers, test data compression and test controllers are modeled at transaction level. Since many aspects of testing involve the transfer of a significant amount of test stimuli and responses, the communication-centric view of TLMs suits this purpose exceptionally well. A case study shows how TLMs can be used to efficiently evaluate DfT decisions in early design steps and how to evaluate test scheduling and resource partitioning during test planning. The presented approach has significantly higher simulation efficiency than RTL and gate level approaches.},
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5090856},
  doi = {http://dx.doi.org/10.1109/DATE.2009.5090856},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2009/DATE_KochtZISRWDP2009.pdf}
}
143. Erkennung von transienten Fehlern in Schaltungen mit reduzierter Verlustleistung;
Detection of transient faults in circuits with reduced power dissipation

Imhof, M.E., Wunderlich, H.-J. and Zoellin, C.G.
2. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'08)
Vol. 57, Ingolstadt, Germany, 29 September-1 October 2008, pp. 107-114
2008
URL PDF 
Keywords: Robustes Design; Fehlertoleranz; Verlustleistung; Latch; Register; Single Event Effect; Robust design; fault tolerance; power dissipation; latch; register; single event effects
Abstract: Für Speicherfelder sind fehlerkorrigierende Codes die vorherrschende Methode, um akzeptable Fehlerraten zu erreichen. In vielen aktuellen Schaltungen erreicht die Zahl der Speicherelemente in freier Logik die Größenordnung der Zahl von SRAM-Zellen vor wenigen Jahren. Zur Reduktion der Verlustleistung wird häufig der Takt der pegelgesteuerten Speicherelemente unterdrückt und die Speicherelemente müssen ihren Zustand über lange Zeitintervalle halten. Die Notwendigkeit Speicherzellen abzusichern wird zusätzlich durch die Miniaturisierung verstärkt, die zu einer erhöhten Empfindlichkeit der Speicherelemente geführt hat. Dieser Artikel stellt eine Methode zur fehlertoleranten Anordnung von pegelgesteuerten Speicherelementen vor, die bei unterdrücktem Takt Einfachfehler lokalisieren und Mehrfachfehler erkennen kann. Bei aktiviertem Takt können Einfach- und Mehrfachfehler erkannt werden. Die Register können ähnlich wie Prüfpfade effizient in den Entwurfsgang integriert werden. Die Diagnoseinformation kann auf Modulebene leicht berechnet und genutzt werden.

For memories error correcting codes are the method of choice to guarantee acceptable error rates. In many current designs the number of storage elements in random logic reaches the number of SRAM-cells some years ago. Clock-gating is often employed to reduce the power dissipation of level-sensitive storage elements while the elements have to retain their state over long periods of time. The necessity to protect storage elements is amplified by the miniaturization, which leads to an increased susceptibility of the storage elements.
This article proposes a method for the fault-tolerant arrangement of level-sensitive storage elements, which can locate single faults and detect multiple faults while being clock-gated. With active clock single and multiple faults can be detected. The registers can be efficiently integrated similar to the scan design flow. The diagnostic information can be easily computed and used at module level.

BibTeX:
@inproceedings{ImhofWZ2008a,
  author = {Imhof, Michael E. and Wunderlich, Hans-Joachim and Zoellin, Christian G.},
  title = {{Erkennung von transienten Fehlern in Schaltungen mit reduzierter Verlustleistung;
Detection of transient faults in circuits with reduced power dissipation}}, booktitle = {2. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'08)}, publisher = {VDE VERLAG GMBH}, year = {2008}, volume = {57}, pages = {107--114}, keywords = {Robustes Design; Fehlertoleranz; Verlustleistung; Latch; Register; Single Event Effect; Robust design; fault tolerance; power dissipation; latch; register; single event effects}, abstract = {Für Speicherfelder sind fehlerkorrigierende Codes die vorherrschende Methode, um akzeptable Fehlerraten zu erreichen. In vielen aktuellen Schaltungen erreicht die Zahl der Speicherelemente in freier Logik die Größenordnung der Zahl von SRAM-Zellen vor wenigen Jahren. Zur Reduktion der Verlustleistung wird häufig der Takt der pegelgesteuerten Speicherelemente unterdrückt und die Speicherelemente müssen ihren Zustand über lange Zeitintervalle halten. Die Notwendigkeit Speicherzellen abzusichern wird zusätzlich durch die Miniaturisierung verstärkt, die zu einer erhöhten Empfindlichkeit der Speicherelemente geführt hat. Dieser Artikel stellt eine Methode zur fehlertoleranten Anordnung von pegelgesteuerten Speicherelementen vor, die bei unterdrücktem Takt Einfachfehler lokalisieren und Mehrfachfehler erkennen kann. Bei aktiviertem Takt können Einfach- und Mehrfachfehler erkannt werden. Die Register können ähnlich wie Prüfpfade effizient in den Entwurfsgang integriert werden. Die Diagnoseinformation kann auf Modulebene leicht berechnet und genutzt werden.

For memories error correcting codes are the method of choice to guarantee acceptable error rates. In many current designs the number of storage elements in random logic reaches the number of SRAM-cells some years ago. Clock-gating is often employed to reduce the power dissipation of level-sensitive storage elements while the elements have to retain their state over long periods of time. The necessity to protect storage elements is amplified by the miniaturization, which leads to an increased susceptibility of the storage elements.
This article proposes a method for the fault-tolerant arrangement of level-sensitive storage elements, which can locate single faults and detect multiple faults while being clock-gated. With active clock single and multiple faults can be detected. The registers can be efficiently integrated similar to the scan design flow. The diagnostic information can be easily computed and used at module level.}, url = {http://www.vde-verlag.de/proceedings-de/453119017.html}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2008/ZuE_ImhofWZ2008a.pdf} }

142. Zur Zuverlässigkeitsmodellierung von Hardware-Software-Systemen;
On the Reliability Modeling of Hardware-Software-Systems

Kochte, M.A., Baranowski, R. and Wunderlich, H.-J.
2. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'08)
Vol. 57, Ingolstadt, Germany, 29 September-1 October 2008, pp. 83-90
2008
URL PDF 
Keywords: Modellierung; Zuverlässigkeit; eingebettete Systeme; System-Level; Systems-on-Chip; Modeling; reliability; embedded systems; system-level; systems-on-chip
Abstract: Zur Zuverlässigkeitsanalyse von Hardware-Software-Systemen ist ein Systemmodell notwendig, welches sowohl Struktur und Architektur der Hardware als auch die ausgeführte Funktion betrachtet. Wird einer dieser Aspekte des Gesamtsystems vernachlässigt, kann sich eine zu optimische oder zu konservative Schätzung der Zuverlässigkeit ergeben. Ein reines Strukturmodell der Hardware erlaubt, den Einfluss von logischer und struktureller Fehlermaskierung auf die Fehlerhäufigkeit der Hardware zu bestimmen. Allerdings kann ein solches Modell nicht die Fehlerhäufigkeit des Gesamtsystems hinreichend genau schätzen. Die Ausführung der Funktion auf dem System führt zu speziellen Nutzungs- und Kommunikationsmustern der Systemkomponenten, die zu erhöhter oder verminderter Anfälligkeit gegenüber Fehlern führen. Diese Arbeit motiviert die Modellierung funktionaler Aspekte zusammen mit der Struktur des Systems. Mittels Fehlerinjektion und Simulation wird der starke Einfluss der Funktion auf die Fehleranfälligkeit des Systems aufgezeigt. Die vorgestellte Methodik, funktionale Aspekte mit in die Zuverlässigkeitsmodellierung einzubinden, verspricht eine realistischere Bewertung von Hardware-Software-Systemen.

Estimating the reliability of hardware-software systems allows to determine the robustness of design alternatives during design exploration. A system model used to derive such a reliability estimate has to incorporate the hardware structure and architecture of the system as well as the performed function. If merely the functional model or the structural model is considered separate from the other one, reliability estimation may be either too optimistic or too conservative.
While an architectural model allows to determine the impact of logical and architectural fault masking on the design's error rate, it fails to correctly predict the failure rate of the overall system. The function that is performed by the design exhibits particular usage and communication patterns that may--depending on the function--result in increased or reduced susceptibility to faults.
This work motivates to model functional aspects together with the architecture of the system. Fault injection and simulation show the strong influence of the function on the susceptability of the system. The proposed methodology to incorporate functional aspects into the system model for reliability estimation promises a more accurate assessment of hardware-software systems.

BibTeX:
@inproceedings{KochtBW2008,
  author = {Kochte, Michael A. and Baranowski, Rafal and Wunderlich, Hans-Joachim},
  title = {{Zur Zuverlässigkeitsmodellierung von Hardware-Software-Systemen;
On the Reliability Modeling of Hardware-Software-Systems}}, booktitle = {2. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'08)}, publisher = {VDE VERLAG GMBH}, year = {2008}, volume = {57}, pages = {83--90}, keywords = {Modellierung; Zuverlässigkeit; eingebettete Systeme; System-Level; Systems-on-Chip; Modeling; reliability; embedded systems; system-level; systems-on-chip}, abstract = {Zur Zuverlässigkeitsanalyse von Hardware-Software-Systemen ist ein Systemmodell notwendig, welches sowohl Struktur und Architektur der Hardware als auch die ausgeführte Funktion betrachtet. Wird einer dieser Aspekte des Gesamtsystems vernachlässigt, kann sich eine zu optimische oder zu konservative Schätzung der Zuverlässigkeit ergeben. Ein reines Strukturmodell der Hardware erlaubt, den Einfluss von logischer und struktureller Fehlermaskierung auf die Fehlerhäufigkeit der Hardware zu bestimmen. Allerdings kann ein solches Modell nicht die Fehlerhäufigkeit des Gesamtsystems hinreichend genau schätzen. Die Ausführung der Funktion auf dem System führt zu speziellen Nutzungs- und Kommunikationsmustern der Systemkomponenten, die zu erhöhter oder verminderter Anfälligkeit gegenüber Fehlern führen. Diese Arbeit motiviert die Modellierung funktionaler Aspekte zusammen mit der Struktur des Systems. Mittels Fehlerinjektion und Simulation wird der starke Einfluss der Funktion auf die Fehleranfälligkeit des Systems aufgezeigt. Die vorgestellte Methodik, funktionale Aspekte mit in die Zuverlässigkeitsmodellierung einzubinden, verspricht eine realistischere Bewertung von Hardware-Software-Systemen.

Estimating the reliability of hardware-software systems allows to determine the robustness of design alternatives during design exploration. A system model used to derive such a reliability estimate has to incorporate the hardware structure and architecture of the system as well as the performed function. If merely the functional model or the structural model is considered separate from the other one, reliability estimation may be either too optimistic or too conservative.
While an architectural model allows to determine the impact of logical and architectural fault masking on the design's error rate, it fails to correctly predict the failure rate of the overall system. The function that is performed by the design exhibits particular usage and communication patterns that may--depending on the function--result in increased or reduced susceptibility to faults.
This work motivates to model functional aspects together with the architecture of the system. Fault injection and simulation show the strong influence of the function on the susceptability of the system. The proposed methodology to incorporate functional aspects into the system model for reliability estimation promises a more accurate assessment of hardware-software systems.}, url = {http://www.vde-verlag.de/proceedings-de/453119013.html}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2008/ZuE_KochtBW2008.pdf} }

141. Integrating Scan Design and Soft Error Correction in Low-Power Applications
Imhof, M.E., Wunderlich, H.-J. and Zoellin, C.G.
Proceedings of the 14th IEEE International On-Line Testing Symposium (IOLTS'08), Rhodes, Greece, 7-9 July 2008, pp. 59-64
2008
DOI URL PDF 
Keywords: Robust design; fault tolerance; latch; low power; register; single event effects
Abstract: Error correcting coding is the dominant technique to achieve acceptable soft-error rates in memory arrays. In many modern circuits, the number of memory elements in the random logic is in the order of the number of SRAM cells on chips only a few years ago. Often latches are clock gated and have to retain their states during longer periods. Moreover, miniaturization has led to elevated susceptibility of the memory elements and further increases the need for protection.
This paper presents a fault-tolerant register latch organization that is able to detect single-bit errors while it is clock gated. With active clock, single and multiple errors are detected. The registers can be efficiently integrated similar to the scan design flow, and error detecting or locating information can be collected at module level. The resulting structure can be efficiently reused for offline and general online testing.
BibTeX:
@inproceedings{ImhofWZ2008,
  author = {Imhof, Michael E. and Wunderlich, Hans-Joachim and Zoellin, Christian G.},
  title = {{Integrating Scan Design and Soft Error Correction in Low-Power Applications}},
  booktitle = {Proceedings of the 14th IEEE International On-Line Testing Symposium (IOLTS'08)},
  publisher = {IEEE Computer Society},
  year = {2008},
  pages = {59--64},
  keywords = {Robust design; fault tolerance; latch; low power; register; single event effects},
  abstract = {Error correcting coding is the dominant technique to achieve acceptable soft-error rates in memory arrays. In many modern circuits, the number of memory elements in the random logic is in the order of the number of SRAM cells on chips only a few years ago. Often latches are clock gated and have to retain their states during longer periods. Moreover, miniaturization has led to elevated susceptibility of the memory elements and further increases the need for protection. 
This paper presents a fault-tolerant register latch organization that is able to detect single-bit errors while it is clock gated. With active clock, single and multiple errors are detected. The registers can be efficiently integrated similar to the scan design flow, and error detecting or locating information can be collected at module level. The resulting structure can be efficiently reused for offline and general online testing.}, url = {http://www.computer.org/csdl/proceedings/iolts/2008/3264/00/3264a059-abs.html}, doi = {http://dx.doi.org/10.1109/IOLTS.2008.31}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2008/IOLTS_ImhofWZ2008.pdf} }
140. Scan Chain Clustering for Test Power Reduction
Elm, M., Wunderlich, H.-J., Imhof, M.E., Zoellin, C.G., Leenstra, J. and Maeding, N.
Proceedings of the 45th ACM/IEEE Design Automation Conference (DAC'08), Anaheim, California, USA, 8-13 June 2008, pp. 828-833
2008
DOI PDF 
Keywords: Test; Design for Test; Low Power; Scan Design
Abstract: An effective technique to save power during scan based test is to switch off unused scan chains. The results obtained with this method strongly depend on the mapping of scan flip-flops into scan chains, which determines how many chains can be deactivated per pattern.
In this paper, a new method to cluster flip-flops into scan chains is presented, which minimizes the power consumption during test. The approach does not specify any ordering inside the chains and fits seamlessly to any standard tool for scan chain integration.

The application of known test power reduction techniques to the optimized scan chain configurations shows significant improvements for large industrial circuits.

BibTeX:
@inproceedings{ElmWIZLM2008,
  author = {Elm, Melanie and Wunderlich, Hans-Joachim and Imhof, Michael E. and Zoellin, Christian G. and Leenstra, Jens and Maeding, Nicolas},
  title = {{Scan Chain Clustering for Test Power Reduction}},
  booktitle = {Proceedings of the 45th ACM/IEEE Design Automation Conference (DAC'08)},
  publisher = {ACM},
  year = {2008},
  pages = {828--833},
  keywords = {Test; Design for Test; Low Power; Scan Design},
  abstract = {An effective technique to save power during scan based test is to switch off unused scan chains. The results obtained with this method strongly depend on the mapping of scan flip-flops into scan chains, which determines how many chains can be deactivated per pattern.
In this paper, a new method to cluster flip-flops into scan chains is presented, which minimizes the power consumption during test. The approach does not specify any ordering inside the chains and fits seamlessly to any standard tool for scan chain integration.

The application of known test power reduction techniques to the optimized scan chain configurations shows significant improvements for large industrial circuits.}, doi = {http://dx.doi.org/10.1145/1391469.1391680}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2008/DAC_ElmWIZLM2008.pdf} }

139. Selective Hardening in Early Design Steps
Zoellin, C.G., Wunderlich, H.-J., Polian, I. and Becker, B.
Proceedings of the 13th IEEE European Test Symposium (ETS'08), Lago Maggiore, Italy, 25-29 May 2008, pp. 185-190
2008
DOI URL PDF 
Keywords: Soft error mitigation; reliability
Abstract: Hardening a circuit against soft errors should be performed in early design steps before the circuit is laid out. A viable approach to achieve soft error rate (SER) reduction at a reasonable cost is to harden only parts of a circuit. When selecting which locations in the circuit to harden, priority should be given to critical spots for which an error is likely to cause a system malfunction. The criticality of the spots depends on parameters not all available in early design steps. We employ a selection strategy which takes only gate-level information into account and does not use any low-level electrical or timing information.
We validate the quality of the solution using an accurate SER estimator based on the new UGC particle strike model. Although only partial information is utilized for hardening, the exact validation shows that the susceptibility of a circuit to soft errors is reduced significantly. The results of the hardening strategy presented are also superior to known purely topological strategies in terms of both hardware overhead and protection.
BibTeX:
@inproceedings{ZoellWPB2008,
  author = {Zoellin, Christian G. and Wunderlich, Hans-Joachim and Polian, Ilia and Becker, Bernd},
  title = {{Selective Hardening in Early Design Steps}},
  booktitle = {Proceedings of the 13th IEEE European Test Symposium (ETS'08)},
  publisher = {IEEE Computer Society},
  year = {2008},
  pages = {185--190},
  keywords = {Soft error mitigation; reliability},
  abstract = {Hardening a circuit against soft errors should be performed in early design steps before the circuit is laid out. A viable approach to achieve soft error rate (SER) reduction at a reasonable cost is to harden only parts of a circuit. When selecting which locations in the circuit to harden, priority should be given to critical spots for which an error is likely to cause a system malfunction. The criticality of the spots depends on parameters not all available in early design steps. We employ a selection strategy which takes only gate-level information into account and does not use any low-level electrical or timing information. 
We validate the quality of the solution using an accurate SER estimator based on the new UGC particle strike model. Although only partial information is utilized for hardening, the exact validation shows that the susceptibility of a circuit to soft errors is reduced significantly. The results of the hardening strategy presented are also superior to known purely topological strategies in terms of both hardware overhead and protection.}, url = {http://www.computer.org/csdl/proceedings/ets/2008/3150/00/3150a185-abs.html}, doi = {http://dx.doi.org/10.1109/ETS.2008.30}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2008/ETS_ZoellWPB2008.pdf} }
138. A Framework for Scheduling Parallel DBMS User-Defined Programs on an Attached High-Performance Computer
Kochte, M.A. and Natarajan, R.
Proceedings of the 2008 conference on Computing frontiers (CF'08), Ischia, Italy, 5-7 May 2008, pp. 97-104
2008
DOI PDF 
Keywords: database accelerators; high-performance computing; parallel user-defined programs
Abstract: We describe a software framework for deploying, scheduling and executing parallel DBMS user-defined programs on an attached high-performance computer (HPC) platform. This framework is advantageous for many DBMS workloads in the following two aspects. First, the long-running user-defined programs can be speeded up by taking advantage of the greater hardware parallelism available on the attached HPC platform. Second, the interactive response time of the remaining applications on the database server platform is improved by the off-loading of long-running user-defined programs to the attached HPC platform. Our framework provides a new approach for integrating high-performance computing into the workflow of query-oriented, computationally-intensive applications.
BibTeX:
@inproceedings{KochtN2008,
  author = {Kochte, Michael A. and Natarajan, Ramesh},
  title = {{A Framework for Scheduling Parallel DBMS User-Defined Programs on an Attached High-Performance Computer}},
  booktitle = {Proceedings of the 2008 conference on Computing frontiers (CF'08)},
  publisher = {ACM},
  year = {2008},
  pages = {97--104},
  keywords = {database accelerators; high-performance computing; parallel user-defined programs},
  abstract = {We describe a software framework for deploying, scheduling and executing parallel DBMS user-defined programs on an attached high-performance computer (HPC) platform. This framework is advantageous for many DBMS workloads in the following two aspects. First, the long-running user-defined programs can be speeded up by taking advantage of the greater hardware parallelism available on the attached HPC platform. Second, the interactive response time of the remaining applications on the database server platform is improved by the off-loading of long-running user-defined programs to the attached HPC platform. Our framework provides a new approach for integrating high-performance computing into the workflow of query-oriented, computationally-intensive applications.},
  doi = {http://dx.doi.org/10.1145/1366230.1366245},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2008/KochtN2008.pdf}
}
137. Signature Rollback – A Technique for Testing Robust Circuits
Amgalan, U., Hachmann, C., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 26th IEEE VLSI Test Symposium (VTS'08), San Diego, California, USA, 27 April-1 May 2008, pp. 125-130
2008
DOI URL PDF 
Keywords: Embedded Test; Robust Design; Rollback and Recovery; Test Quality and Reliability; Time Redundancy
Abstract: Dealing with static and dynamic parameter variations has become a major challenge for design and test. To avoid unnecessary yield loss and to ensure reliable system operation a robust design has become mandatory. However, standard structural test procedures still address classical fault models and cannot deal with the non-deterministic behavior caused by parameter variations and other reasons. Chips may be rejected, even if the test reveals only non-critical failures that could be compensated during system operation. This paper introduces a scheme for embedded test, which can distinguish critical permanent and noncritical transient failures for circuits with time redundancy. To minimize both yield loss and the overall test time, the scheme relies on partitioning the test into shorter sessions. If a faulty signature is observed at the end of a session, a rollback is triggered, and this particular session is repeated. An analytical model for the expected overall test time provides guidelines to determine the optimal parameters of the scheme.
BibTeX:
@inproceedings{AmgalHHW2008,
  author = {Amgalan, Uranmandakh and Hachmann, Christian and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Signature Rollback – A Technique for Testing Robust Circuits}},
  booktitle = {Proceedings of the 26th IEEE VLSI Test Symposium (VTS'08)},
  publisher = {IEEE Computer Society},
  year = {2008},
  pages = {125--130},
  keywords = {Embedded Test; Robust Design; Rollback and Recovery; Test Quality and Reliability; Time Redundancy},
  abstract = {Dealing with static and dynamic parameter variations has become a major challenge for design and test. To avoid unnecessary yield loss and to ensure reliable system operation a robust design has become mandatory. However, standard structural test procedures still address classical fault models and cannot deal with the non-deterministic behavior caused by parameter variations and other reasons. Chips may be rejected, even if the test reveals only non-critical failures that could be compensated during system operation. This paper introduces a scheme for embedded test, which can distinguish critical permanent and noncritical transient failures for circuits with time redundancy. To minimize both yield loss and the overall test time, the scheme relies on partitioning the test into shorter sessions. If a faulty signature is observed at the end of a session, a rollback is triggered, and this particular session is repeated. An analytical model for the expected overall test time provides guidelines to determine the optimal parameters of the scheme.},
  url = {http://www.computer.org/csdl/proceedings/vts/2008/3123/00/3123a125-abs.html},
  doi = {http://dx.doi.org/10.1109/VTS.2008.34},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2008/VTS_AmgalHHW2008.pdf}
}
136. Scan Chain Organization for Embedded Diagnosis
Elm, M. and Wunderlich, H.-J.
Proceedings of the 11th Conference on Design, Automation and Test in Europe (DATE'08), Munich, Germany, 10-14 March 2008, pp. 468-473
2008
DOI URL PDF 
Keywords: design for diagnosis; embedded test; scan design
Abstract: Keeping diagnostic resolution as high as possible while maximizing the compaction ratio is subject to research since the advent of embedded test. In this paper, we present a novel scan design methodology to maximize diagnostic resolution when compaction is employed. The essential idea is to consider the diagnostic resolution during the clustering of scan elements to scan chains. Our methodology does not depend on a fault model and is helpful with any type of compactor.
A linear time heuristic is presented to solve the scan chain clustering problem. We evaluate our approach for industrial and academic benchmark circuits. It turns out to be superior to both random and to layout driven scan chain clustering. The methodology is applicable to any gate-level design and fits smoothly into an industrial design flow.
BibTeX:
@inproceedings{ElmW2008,
  author = {Elm, Melanie and Wunderlich, Hans-Joachim},
  title = {{Scan Chain Organization for Embedded Diagnosis}},
  booktitle = {Proceedings of the 11th Conference on Design, Automation and Test in Europe (DATE'08)},
  publisher = {IEEE Computer Society},
  year = {2008},
  pages = {468--473},
  keywords = {design for diagnosis; embedded test; scan design},
  abstract = {Keeping diagnostic resolution as high as possible while maximizing the compaction ratio is subject to research since the advent of embedded test. In this paper, we present a novel scan design methodology to maximize diagnostic resolution when compaction is employed. The essential idea is to consider the diagnostic resolution during the clustering of scan elements to scan chains. Our methodology does not depend on a fault model and is helpful with any type of compactor.
A linear time heuristic is presented to solve the scan chain clustering problem. We evaluate our approach for industrial and academic benchmark circuits. It turns out to be superior to both random and to layout driven scan chain clustering. The methodology is applicable to any gate-level design and fits smoothly into an industrial design flow.}, url = {http://www.computer.org/csdl/proceedings/date/2008/8013/00/04484725-abs.html}, doi = {http://dx.doi.org/10.1109/DATE.2008.4484725}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2008/DATE_ElmW2008.pdf} }
135. Test Set Stripping Limiting the Maximum Number of Specified Bits
Kochte, M.A., Zoellin, C.G., Imhof, M.E. and Wunderlich, H.-J.
Proceedings of the 4th IEEE International Symposium on Electronic Design, Test and Applications (DELTA'08), Hong Kong, China, 23-25 January 2008, pp. 581-586
Best paper award
2008
DOI URL PDF 
Keywords: test relaxation; test generation; tailored ATPG
Abstract: This paper presents a technique that limits the maximum number of specified bits of any pattern in a given test set. The outlined method uses algorithms similar to ATPG, but exploits the information in the test set to quickly find test patterns with the desired properties. The resulting test sets show a significant reduction in the maximum number of specified bits in the test patterns. Furthermore, results for commercial ATPG test sets show that even the overall number of specified bits is reduced substantially
BibTeX:
@inproceedings{KochtZIW2008,
  author = {Kochte, Michael A. and Zoellin, Christian G. and Imhof, Michael E. and Wunderlich, Hans-Joachim},
  title = {{Test Set Stripping Limiting the Maximum Number of Specified Bits}},
  booktitle = {Proceedings of the 4th IEEE International Symposium on Electronic Design, Test and Applications (DELTA'08)},
  publisher = {IEEE Computer Society},
  year = {2008},
  pages = {581--586},
  keywords = {test relaxation; test generation; tailored ATPG},
  abstract = {This paper presents a technique that limits the maximum number of specified bits of any pattern in a given test set. The outlined method uses algorithms similar to ATPG, but exploits the information in the test set to quickly find test patterns with the desired properties. The resulting test sets show a significant reduction in the maximum number of specified bits in the test patterns. Furthermore, results for commercial ATPG test sets show that even the overall number of specified bits is reduced substantially},
  url = {http://www.computer.org/csdl/proceedings/delta/2008/3110/00/3110a581-abs.html},
  doi = {http://dx.doi.org/10.1109/DELTA.2008.64},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2008/DELTA_KochtZIW2008.pdf}
}
134. Testing and Monitoring Nanoscale Systems - Challenges and Strategies for Advanced Quality Assurance
Hellebrand, S., Zoellin, C.G., Wunderlich, H.-J., Ludwig, S., Coym, T. and Straube, B.
Informacije MIDEM
Vol. 37(4(124)), December 2007, pp. 212-219
2007
URL PDF 
Abstract: The increased number of fabrication defects, spatial and temporal variability of parameters, as well as the growing impact of soft errors in nanoelectronic systems require a paradigm shift in design, verification and test. A robust design becomes mandatory to ensure dependable systems and acceptable yields. Design robustness, however, invalidates many traditional approaches for testing and implies enormous challenges. The RealTest Project addresses these problems for nanoscale CMOS and targets unified design and test strategies to support both a robust design and a coordinated quality assurance after manufacturing and during the lifetime of a system. The paper first gives a short overview of the research activities within the project and then focuses on a first result concerning soft errors in combinational logic. It will be shown that common electrical models for particle strikes in random logic have underestimated the effects on the system behavior. The refined model developed within the RealTest Project predicts about twice as many single events upsets (SEUs) caused by particle strikes as traditional models.
BibTeX:
@article{HelleZWLCS2007b,
  author = {Hellebrand, Sybille and Zoellin, Christian G. and Wunderlich, Hans-Joachim and Ludwig, Stefan and Coym, Torsten and Straube, Bernd},
  title = {{Testing and Monitoring Nanoscale Systems - Challenges and Strategies for Advanced Quality Assurance}},
  journal = {Informacije MIDEM},
  publisher = {MIDEM},
  year = {2007},
  volume = {37},
  number = {4(124)},
  pages = {212--219},
  abstract = {The increased number of fabrication defects, spatial and temporal variability of parameters, as well as the growing impact of soft errors in nanoelectronic systems require a paradigm shift in design, verification and test. A robust design becomes mandatory to ensure dependable systems and acceptable yields. Design robustness, however, invalidates many traditional approaches for testing and implies enormous challenges. The RealTest Project addresses these problems for nanoscale CMOS and targets unified design and test strategies to support both a robust design and a coordinated quality assurance after manufacturing and during the lifetime of a system. The paper first gives a short overview of the research activities within the project and then focuses on a first result concerning soft errors in combinational logic. It will be shown that common electrical models for particle strikes in random logic have underestimated the effects on the system behavior. The refined model developed within the RealTest Project predicts about twice as many single events upsets (SEUs) caused by particle strikes as traditional models.},
  url = {http://www.midem-drustvo.si/journal.htm},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/MIDEM_HelleZWLCS2007a.pdf}
}
133. Debug and Diagnosis: Mastering the Life Cycle of Nano-Scale Systems on Chip
Wunderlich, H.-J., Elm, M. and Holst, S.
Informacije MIDEM
Vol. 37(4(124)), December 2007, pp. 235-243
2007
URL PDF 
Keywords: Diagnosis; Debug; Embedded Test
Abstract: Rising design complexity and shrinking structures pose new challenges for debug and diagnosis. Finding bugs and defects quickly during the whole life cycle of a product is crucial for time to market, time to volume and improved product quality. Debug of design errors and diagnosis of defects have many common aspects. In this paper we give an overview of state of the art algorithms, which tackle both tasks, and present an adaptive approach to design debug and logic diagnosis.

Special design for diagnosis is needed to maintain visibility of internal states and diagnosability of deeply embedded cores. This article discusses current approaches to design for diagnosis to support all debug tasks from first silicon to the system level.

BibTeX:
@article{WundeEH2007a,
  author = {Wunderlich, Hans-Joachim and Elm, Melani and Holst, Stefan},
  title = {{Debug and Diagnosis: Mastering the Life Cycle of Nano-Scale Systems on Chip}},
  journal = {Informacije MIDEM},
  publisher = {MIDEM},
  year = {2007},
  volume = {37},
  number = {4(124)},
  pages = {235--243},
  keywords = {Diagnosis; Debug; Embedded Test},
  abstract = {Rising design complexity and shrinking structures pose new challenges for debug and diagnosis. Finding bugs and defects quickly during the whole life cycle of a product is crucial for time to market, time to volume and improved product quality. Debug of design errors and diagnosis of defects have many common aspects. In this paper we give an overview of state of the art algorithms, which tackle both tasks, and present an adaptive approach to design debug and logic diagnosis.

Special design for diagnosis is needed to maintain visibility of internal states and diagnosability of deeply embedded cores. This article discusses current approaches to design for diagnosis to support all debug tasks from first silicon to the system level.}, url = {http://www.midem-drustvo.si/journal.htm}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/MIDEM_WundeEH2007.pdf} }

132. Academic Network for Microelectronic Test Education
Novak, F., Biasizzo, A., Bertrand, Y., Flottes, M.-L., Balado, L., Figueras, J., Di Carlo, S., Prinetto, P., Pricopi, N., Wunderlich, H.-J. and Van Der Heyden, J.P.
The International Journal of Engineering Education
Vol. 23(6), November 2007, pp. 1245-1253
2007
URL PDF 
Keywords: microelectronic circuit test; remote on-line test; digital test; mixed-signal test; memory test, automatic test equipment; test education
Abstract: This paper is an overview of the activities performed in the framework of the European IST project EuNICE-Test (European Network for Initial and Continuing Education in VLSI/SOC Testing) using remote automatic test equipment (ATE) ), addressing the shortage of skills in the microelectronics industry in the field of electronic testing. The project was based on the experience of the common test resource centre (CRTC) for French universities. In the framework of the EuNICE-Test project, the existing network expanded to 4 new academic centres: Universitat Politecnica de Catalunya, Spain, Politecnico di Torino, Italy, University of Stuttgart, Germany and Jozef Stefan Institute Ljubljana, Slovenia. Assessments of the results achieved are presented as well as course topics and possible future extensions.
BibTeX:
@article{NovakBBFBFDPPWV2007,
  author = {Novak, Frank and Biasizzo, Anton and Bertrand, Yves and Flottes, Marie-Lise and Balado, Luz and Figueras, Joan and Di Carlo, Stefano and Prinetto, Paolo and Pricopi, Nicoleta and Wunderlich, Hans-Joachim and Van Der Heyden, Jean Pierre},
  title = {{Academic Network for Microelectronic Test Education}},
  journal = {The International Journal of Engineering Education},
  publisher = {International Journal of Engineering Education},
  year = {2007},
  volume = {23},
  number = {6},
  pages = {1245--1253},
  keywords = {microelectronic circuit test; remote on-line test; digital test; mixed-signal test; memory test, automatic test equipment; test education},
  abstract = {This paper is an overview of the activities performed in the framework of the European IST project EuNICE-Test (European Network for Initial and Continuing Education in VLSI/SOC Testing) using remote automatic test equipment (ATE) ), addressing the shortage of skills in the microelectronics industry in the field of electronic testing. The project was based on the experience of the common test resource centre (CRTC) for French universities. In the framework of the EuNICE-Test project, the existing network expanded to 4 new academic centres: Universitat Politecnica de Catalunya, Spain, Politecnico di Torino, Italy, University of Stuttgart, Germany and Jozef Stefan Institute Ljubljana, Slovenia. Assessments of the results achieved are presented as well as course topics and possible future extensions.},
  url = {http://www.ingentaconnect.com/content/intjee/ijee/2007/00000023/00000006/art00021},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/NovakBBFBFCPPWH2007.pdf}
}
131. Programmable Deterministic Built-in Self-test
Hakmi, A.-W., Wunderlich, H.-J., Zoellin, C.G., Glowatz, A., Hapke, F., Schloeffel, J. and Souef, L.
Proceedings of the International Test Conference (ITC'07), Santa Clara, California, USA, 21-25 October 2007, pp. 1-9
2007
DOI PDF 
Keywords: Deterministic BIST, Test data compression
Abstract: In this paper, we propose a new programmable deterministic Built-In Self-Test (BIST) method that requires significantly lower storage for deterministic patterns than existing programmable methods and provides high flexibility for test engineering in both internal and external test.
Theoretical analysis suggests that significantly more care bits can be encoded in the seed of a Linear Feedback Shift Register (LFSR), if a limited number of conflicting equations is ignored in the employed linear equation system. The ignored care bits are separately embedded into the LFSR pattern. In contrast to known deterministic BIST schemes based on test set embedding, the embedding logic function is not hardwired. Instead, this information is stored in memory using a special compression and decompression method. Experiments for benchmark circuits and industrial designs demonstrate that the approach has considerably higher overall coding efficiency than the existing methods.
BibTeX:
@inproceedings{HakmiWZGHSS2007,
  author = {Hakmi, Abdul-Wahid and Wunderlich, Hans-Joachim and Zoellin, Christian G. and Glowatz, Andreas and Hapke, Friedrich and Schloeffel, Juergen and Souef, Laurent},
  title = {{Programmable Deterministic Built-in Self-test}},
  booktitle = {Proceedings of the International Test Conference (ITC'07)},
  publisher = {IEEE Computer Society},
  year = {2007},
  pages = {1--9},
  keywords = {Deterministic BIST, Test data compression},
  abstract = {In this paper, we propose a new programmable deterministic Built-In Self-Test (BIST) method that requires significantly lower storage for deterministic patterns than existing programmable methods and provides high flexibility for test engineering in both internal and external test. 
Theoretical analysis suggests that significantly more care bits can be encoded in the seed of a Linear Feedback Shift Register (LFSR), if a limited number of conflicting equations is ignored in the employed linear equation system. The ignored care bits are separately embedded into the LFSR pattern. In contrast to known deterministic BIST schemes based on test set embedding, the embedding logic function is not hardwired. Instead, this information is stored in memory using a special compression and decompression method. Experiments for benchmark circuits and industrial designs demonstrate that the approach has considerably higher overall coding efficiency than the existing methods.}, doi = {http://dx.doi.org/10.1109/TEST.2007.4437611}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/ITC_HakmiWZGHSS2007.pdf} }
130. A Refined Electrical Model for Particle Strikes and its Impact on SEU Prediction
Hellebrand, S., Zoellin, C.G., Wunderlich, H.-J., Ludwig, S., Coym, T. and Straube, B.
Proceedings of the 22nd IEEE International Symposium on Defect and Fault Tolerance in VLSI Systems (DFT'07), Rome, Italy, 26-28 September 2007, pp. 50-58
2007
DOI URL PDF 
Abstract: Decreasing feature sizes have led to an increased vulnerability of random logic to soft errors. In combinational logic a particle strike may lead to a glitch at the output of a gate, also referred to as single even transient (SET), which in turn can propagate to a register and cause a single event upset (SEU) there.
Circuit level modeling and analysis of SETs provides an attractive compromise between computationally expensive simulations at device level and less accurate techniques at higher levels. At the circuit level particle strikes crossing a pn-junction are traditionally modeled with the help of a transient current source. However, the common models assume a constant voltage across the pn-junction, which may lead to inaccurate predictions concerning the shape of expected glitches. To overcome this problem, a refined circuit level model for strikes through pnjunctions is investigated and validated in this paper. The refined model yields significantly different results than common models. This has a considerable impact on SEU prediction, which is confirmed by extensive simulations at gate level. In most cases, the refined, more realistic, model reveals an almost doubled risk of a system failure after an SET.
BibTeX:
@inproceedings{HelleZWLCS2007,
  author = {Hellebrand, Sybille and Zoellin, Christian G. and Wunderlich, Hans-Joachim and Ludwig, Stefan and Coym, Torsten and Straube, Bernd},
  title = {{A Refined Electrical Model for Particle Strikes and its Impact on SEU Prediction}},
  booktitle = {Proceedings of the 22nd IEEE International Symposium on Defect and Fault Tolerance in VLSI Systems (DFT'07)},
  publisher = {IEEE Computer Society},
  year = {2007},
  pages = {50--58},
  abstract = {Decreasing feature sizes have led to an increased vulnerability of random logic to soft errors. In combinational logic a particle strike may lead to a glitch at the output of a gate, also referred to as single even transient (SET), which in turn can propagate to a register and cause a single event upset (SEU) there. 
Circuit level modeling and analysis of SETs provides an attractive compromise between computationally expensive simulations at device level and less accurate techniques at higher levels. At the circuit level particle strikes crossing a pn-junction are traditionally modeled with the help of a transient current source. However, the common models assume a constant voltage across the pn-junction, which may lead to inaccurate predictions concerning the shape of expected glitches. To overcome this problem, a refined circuit level model for strikes through pnjunctions is investigated and validated in this paper. The refined model yields significantly different results than common models. This has a considerable impact on SEU prediction, which is confirmed by extensive simulations at gate level. In most cases, the refined, more realistic, model reveals an almost doubled risk of a system failure after an SET.}, url = {http://www.computer.org/csdl/proceedings/dft/2007/2885/00/28850050-abs.html}, doi = {http://dx.doi.org/10.1109/DFT.2007.43}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/DFT_HelleZWLCS2007.pdf} }
129. Testing and Monitoring Nanoscale Systems - Challenges and Strategies for Advanced Quality Assurance (Invited Paper)
Hellebrand, S., Zoellin, C.G., Wunderlich, H.-J., Ludwig, S., Coym, T. and Straube, B.
Proceedings of 43rd International Conference on Microelectronics, Devices and Material with the Workshop on Electronic Testing (MIDEM'07), Bled, Slovenia, 12-14 September 2007, pp. 3-10
2007
PDF 
Abstract: The increased number of fabrication defects, spatial and temporal variability of parameters, as well as the growing impact of soft errors in nanoelectronic systems require a paradigm shift in design, verification and test. A robust design becomes mandatory to ensure dependable systems and acceptable yields. Design robustness, however, invalidates many traditional approaches for testing and implies enormous challenges. The RealTest Project addresses these problems for nanoscale CMOS and targets unified design and test strategies to support both a robust design and a coordinated quality assurance after manufacturing and during the lifetime of a system. The paper first gives a short overview of the research activities within the project and then focuses on a first result concerning soft errors in combinational logic. It will be shown that common electrical models for particle strikes in random logic have underestimated the effects on the system behavior. The refined model developed within the RealTest Project predicts about twice as many single events upsets (SEUs) caused by particle strikes as traditional models.
BibTeX:
@inproceedings{HelleZWLCS2007a,
  author = {Hellebrand, Sybille and Zoellin, Christian G. and Wunderlich, Hans-Joachim and Ludwig, Stefan and Coym, Torsten and Straube, Bernd},
  title = {{Testing and Monitoring Nanoscale Systems - Challenges and Strategies for Advanced Quality Assurance (Invited Paper)}},
  booktitle = {Proceedings of 43rd International Conference on Microelectronics, Devices and Material with the Workshop on Electronic Testing (MIDEM'07)},
  publisher = {MIDEM},
  year = {2007},
  pages = {3--10},
  abstract = {The increased number of fabrication defects, spatial and temporal variability of parameters, as well as the growing impact of soft errors in nanoelectronic systems require a paradigm shift in design, verification and test. A robust design becomes mandatory to ensure dependable systems and acceptable yields. Design robustness, however, invalidates many traditional approaches for testing and implies enormous challenges. The RealTest Project addresses these problems for nanoscale CMOS and targets unified design and test strategies to support both a robust design and a coordinated quality assurance after manufacturing and during the lifetime of a system. The paper first gives a short overview of the research activities within the project and then focuses on a first result concerning soft errors in combinational logic. It will be shown that common electrical models for particle strikes in random logic have underestimated the effects on the system behavior. The refined model developed within the RealTest Project predicts about twice as many single events upsets (SEUs) caused by particle strikes as traditional models.},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/MIDEM_HelleZWLCS2007a.pdf}
}
128. Debug and Diagnosis: Mastering the Life Cycle of Nano-Scale Systems on Chip (Invited Paper)
Wunderlich, H.-J., Elm, M. and Holst, S.
Proceedings of 43rd International Conference on Microelectronics, Devices and Material with the Workshop on Electronic Testing (MIDEM'07), Bled, Slovenia, 12-14 September 2007, pp. 27-36
2007
PDF 
Keywords: Diagnosis; Debug; Embedded Test
Abstract: Rising design complexity and shrinking structures pose new challenges for debug and diagnosis. Finding bugs and defects quickly during the whole life cycle of a product is crucial for time to market, time to volume and improved product quality. Debug of design errors and diagnosis of defects have many common aspects. In this paper we give an overview of state of the art algorithms, which tackle both tasks, and present an adaptive approach to design debug and logic diagnosis.

Special design for diagnosis is needed to maintain visibility of internal states and diagnosability of deeply embedded cores. This article discusses current approaches to design for diagnosis to support all debug tasks from first silicon to the system level.

BibTeX:
@inproceedings{WundeEH2007,
  author = {Wunderlich, Hans-Joachim and Elm, Melani and Holst, Stefan},
  title = {{Debug and Diagnosis: Mastering the Life Cycle of Nano-Scale Systems on Chip (Invited Paper)}},
  booktitle = {Proceedings of 43rd International Conference on Microelectronics, Devices and Material with the Workshop on Electronic Testing (MIDEM'07)},
  publisher = {MIDEM},
  year = {2007},
  pages = {27--36},
  keywords = {Diagnosis; Debug; Embedded Test},
  abstract = {Rising design complexity and shrinking structures pose new challenges for debug and diagnosis. Finding bugs and defects quickly during the whole life cycle of a product is crucial for time to market, time to volume and improved product quality. Debug of design errors and diagnosis of defects have many common aspects. In this paper we give an overview of state of the art algorithms, which tackle both tasks, and present an adaptive approach to design debug and logic diagnosis.

Special design for diagnosis is needed to maintain visibility of internal states and diagnosability of deeply embedded cores. This article discusses current approaches to design for diagnosis to support all debug tasks from first silicon to the system level.}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/MIDEM_WundeEH2007.pdf} }

127. Scan Test Planning for Power Reduction
Imhof, M.E., Zoellin, C.G., Wunderlich, H.-J., Maeding, N. and Leenstra, J.
Proceedings of the 44th ACM/IEEE Design Automation Conference (DAC'07), San Diego, California, USA, 4-8 June 2007, pp. 521-526
2007
DOI URL PDF 
Keywords: Test planning, power during test
Abstract: Many STUMPS architectures found in current chip designs allow disabling of individual scan chains for debug and diagnosis. In a recent paper it has been shown that this feature can be used for reducing the power consumption during test. Here, we present an efficient algorithm for the automated generation of a test plan that keeps fault coverage as well as test time, while significantly reducing the amount of wasted energy. A fault isolation table, which is usually used for diagnosis and debug, is employed to accurately determine scan chains that can be disabled. The algorithm was successfully applied to large industrial circuits and identifies a very large amount of excess pattern shift activity.
BibTeX:
@inproceedings{ImhofZWML2007a,
  author = {Imhof, Michael E. and Zoellin, Christian G. and Wunderlich, Hans-Joachim and Maeding, Nicolas and Leenstra, Jens},
  title = {{Scan Test Planning for Power Reduction}},
  booktitle = {Proceedings of the 44th ACM/IEEE Design Automation Conference (DAC'07)},
  publisher = {ACM},
  year = {2007},
  pages = {521--526},
  keywords = {Test planning, power during test},
  abstract = {Many STUMPS architectures found in current chip designs allow disabling of individual scan chains for debug and diagnosis. In a recent paper it has been shown that this feature can be used for reducing the power consumption during test. Here, we present an efficient algorithm for the automated generation of a test plan that keeps fault coverage as well as test time, while significantly reducing the amount of wasted energy. A fault isolation table, which is usually used for diagnosis and debug, is employed to accurately determine scan chains that can be disabled. The algorithm was successfully applied to large industrial circuits and identifies a very large amount of excess pattern shift activity.},
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=4261239},
  doi = {http://dx.doi.org/10.1145/1278480.1278614},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/DAC_ImhofZWML2007a.pdf}
}
126. Adaptive Debug and Diagnosis Without Fault Dictionaries
Holst, S. and Wunderlich, H.-J.
Proceedings of the 12th IEEE European Test Symposium (ETS'07), Freiburg, Germany, 20-24 May 2007, pp. 7-12
Best paper award
2007
DOI URL PDF 
Keywords: Diagnosis; Debug; Test; VLSI
Abstract: Diagnosis is essential in modern chip production to increase yield, and debug constitutes a major part in the presilicon development process. For recent process technologies, defect mechanisms are increasingly complex, and continuous efforts are made to model these defects by using sophisticated fault models. Traditional static approaches for debug and diagnosis with a simplified fault model are more and more limited.
In this paper, a method is presented, which identifies possible faulty regions in a combinational circiut, based on its input/output behavior and independent of a fault model. The new adaptive, statistical approach combines a flexible and powerful effect-cause pattern analysis algorithm with high-resolution ATPG. We show the effectiveness of the approach through experiments with benchmark and industrial circuits.
BibTeX:
@inproceedings{HolstW2007,
  author = {Holst, Stefan and Wunderlich, Hans-Joachim},
  title = {{Adaptive Debug and Diagnosis Without Fault Dictionaries}},
  booktitle = {Proceedings of the 12th IEEE European Test Symposium (ETS'07)},
  publisher = {IEEE Computer Society},
  year = {2007},
  pages = {7--12},
  keywords = {Diagnosis; Debug; Test; VLSI},
  abstract = {Diagnosis is essential in modern chip production to increase yield, and debug constitutes a major part in the presilicon development process. For recent process technologies, defect mechanisms are increasingly complex, and continuous efforts are made to model these defects by using sophisticated fault models. Traditional static approaches for debug and diagnosis with a simplified fault model are more and more limited. 
In this paper, a method is presented, which identifies possible faulty regions in a combinational circiut, based on its input/output behavior and independent of a fault model. The new adaptive, statistical approach combines a flexible and powerful effect-cause pattern analysis algorithm with high-resolution ATPG. We show the effectiveness of the approach through experiments with benchmark and industrial circuits.}, url = {http://www.computer.org/csdl/proceedings/ets/2007/2827/00/28270007-abs.html}, doi = {http://dx.doi.org/10.1109/ETS.2007.9}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/ETS_HolstW2007.pdf} }
125. An Integrated Built-in Test and Repair Approach for Memories with 2D Redundancy
Öhler, P., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 12th IEEE European Test Symposium (ETS'07), Freiburg, Germany, 20-24 May 2007, pp. 91-96
2007
DOI URL PDF 
Abstract: An efficient on-chip infrastructure for memory test and repair is crucial to enhance yield and availability of SoCs. A commonly used repair strategy is to equip memories with spare rows and columns (2D redundancy). Although exact algorithms are available for offline repair analysis, they cannot be directly applied on-chip because of the prohibitive storage requirements for failture bitmaps and the complex data structures inherent in the algorithms. Existing heuristics for built-in repair analysis (BIRA) try to circumvent this problem either by very simple search strategies or by restricting the search to smaller local bitmaps. Exact BIRA algorithms work with sub analyzers for each possible repair combination. While a parallel implementation suffers from a high hardware overhead, a serial implementation leads to high test times. The integrated built-in test and repair approach proposed in this paper interleaves test and repair analysis and supports an exact solution without failure bitmap. The search can be implemented with a stack, which is limited by the number of redundant elements. The basic search procedure is combined with an efficient technique to continuously reduce the problem complexity and keep the test and analysis time low.
BibTeX:
@inproceedings{OehleHW2007,
  author = {Öhler, Phillip and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{An Integrated Built-in Test and Repair Approach for Memories with 2D Redundancy}},
  booktitle = {Proceedings of the 12th IEEE European Test Symposium (ETS'07)},
  publisher = {IEEE Computer Society},
  year = {2007},
  pages = {91--96},
  abstract = {An efficient on-chip infrastructure for memory test and repair is crucial to enhance yield and availability of SoCs. A commonly used repair strategy is to equip memories with spare rows and columns (2D redundancy). Although exact algorithms are available for offline repair analysis, they cannot be directly applied on-chip because of the prohibitive storage requirements for failture bitmaps and the complex data structures inherent in the algorithms. Existing heuristics for built-in repair analysis (BIRA) try to circumvent this problem either by very simple search strategies or by restricting the search to smaller local bitmaps. Exact BIRA algorithms work with sub analyzers for each possible repair combination. While a parallel implementation suffers from a high hardware overhead, a serial implementation leads to high test times. The integrated built-in test and repair approach proposed in this paper interleaves test and repair analysis and supports an exact solution without failure bitmap. The search can be implemented with a stack, which is limited by the number of redundant elements. The basic search procedure is combined with an efficient technique to continuously reduce the problem complexity and keep the test and analysis time low.},
  url = {http://www.computer.org/csdl/proceedings/ets/2007/2827/00/28270091-abs.html},
  doi = {http://dx.doi.org/10.1109/ETS.2007.10},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/ETS_OehleHW2007.pdf}
}
124. Deterministic Logic BIST for Transition Fault Testing
Gherman, V., Wunderlich, H.-J., Schloeffel, J. and Garbers, M.
IET Computers & Digital Techniques
Vol. 1(3), May 2007, pp. 180-186
2007
DOI URL PDF 
Keywords: Deterministic logic BIST; delay test generation; transition faults
Abstract: BIST is an attractive approach to detect delay faults due to its inherent support for at-speed test. Deterministic logic BIST (DLBIST) is a technique which was successfully applied to stuck-at fault testing. As delay faults have lower random pattern testability than stuck-at faults, the need for DLBIST schemes is increased. Nevertheless, an extension to delay fault testing is not trivial, since this necessitates the application of pattern pairs. Consequently, delay fault testing is expected to require a larger mapping effort and logic overhead than stuck-at fault testing.
In this paper, we consider the so-called transition fault model, which is widely used for complexity reasons. We present an extension of a DLBIST scheme for transition fault testing. Functional justification has been used to generate the required pattern pairs. The efficiency of the extended scheme is investigated by using difficult to test industrial designs.
BibTeX:
@article{GhermWSG2007,
  author = {Gherman, Valentin and Wunderlich, Hans-Joachim and Schloeffel, Juergen and Garbers, Michael},
  title = {{Deterministic Logic BIST for Transition Fault Testing}},
  journal = {IET Computers & Digital Techniques},
  publisher = {Institution of Engineering and Technology},
  year = {2007},
  volume = {1},
  number = {3},
  pages = {180--186},
  keywords = {Deterministic logic BIST; delay test generation; transition faults},
  abstract = {BIST is an attractive approach to detect delay faults due to its inherent support for at-speed test. Deterministic logic BIST (DLBIST) is a technique which was successfully applied to stuck-at fault testing. As delay faults have lower random pattern testability than stuck-at faults, the need for DLBIST schemes is increased. Nevertheless, an extension to delay fault testing is not trivial, since this necessitates the application of pattern pairs. Consequently, delay fault testing is expected to require a larger mapping effort and logic overhead than stuck-at fault testing. 
In this paper, we consider the so-called transition fault model, which is widely used for complexity reasons. We present an extension of a DLBIST scheme for transition fault testing. Functional justification has been used to generate the required pattern pairs. The efficiency of the extended scheme is investigated by using difficult to test industrial designs.}, url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=4205033}, doi = {http://digital-library.theiet.org/content/journals/10.1049/iet-cdt_20060131}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/GhermWSG2007.pdf} }
123. Analyzing Test and Repair Times for 2D Integrated Memory Built-in Test and Repair
Öhler, P., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 10th IEEE Workshop on Design and Diagnostics of Electronic Circuits and Systems (DDECS'07), Krakow, Poland, 11-13 April 2007, pp. 185-190
Best paper award
2007
DOI URL PDF 
Abstract: An efficient on-chip infrastructure for memory test and repair is crucial to enhance yield and availability of SoCs. A commonly used repair strategy is to equip memories with spare rows and columns (2D redundancy). To advoid the prohibitive storage requirements for failure bitmaps and the complex data structures inherent in most algorithms for offline repair analysis, existing heuristics for built-in repair analysis (BIRA) either use very simple search strategies or restict the search to smaller local bitmaps. Exact BIRA algorithms work with sub analyzers for each possible repair combination. While a parallel implementation suffers from a high hardware overhead, a serial implementation leads to increased test times. Recently an integrated built-in test and repair approach has been proposed which interleaves test and repair analysis and supports an exact solution with moderate hardware overhead and reasonable test times. The search is based on a depth first traversal of a binary tree, which can be efficiently implemented using a stack of limited size. This algorithm can be realized with different repair strategies guiding the selection of spare rows or columns in each step. In this paper the impact of four different repair strategies on the test and repair time is analyzed.
BibTeX:
@inproceedings{OehleHW2007a,
  author = {Öhler, Phillip and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Analyzing Test and Repair Times for 2D Integrated Memory Built-in Test and Repair}},
  booktitle = {Proceedings of the 10th IEEE Workshop on Design and Diagnostics of Electronic Circuits and Systems (DDECS'07)},
  publisher = {IEEE Computer Society},
  year = {2007},
  pages = {185--190},
  abstract = {An efficient on-chip infrastructure for memory test and repair is crucial to enhance yield and availability of SoCs. A commonly used repair strategy is to equip memories with spare rows and columns (2D redundancy). To advoid the prohibitive storage requirements for failure bitmaps and the complex data structures inherent in most algorithms for offline repair analysis, existing heuristics for built-in repair analysis (BIRA) either use very simple search strategies or restict the search to smaller local bitmaps. Exact BIRA algorithms work with sub analyzers for each possible repair combination. While a parallel implementation suffers from a high hardware overhead, a serial implementation leads to increased test times. Recently an integrated built-in test and repair approach has been proposed which interleaves test and repair analysis and supports an exact solution with moderate hardware overhead and reasonable test times. The search is based on a depth first traversal of a binary tree, which can be efficiently implemented using a stack of limited size. This algorithm can be realized with different repair strategies guiding the selection of spare rows or columns in each step. In this paper the impact of four different repair strategies on the test and repair time is analyzed.},
  url = {http://www.computer.org/csdl/proceedings/ddecs/2007/1161/00/04295278-abs.html},
  doi = {http://dx.doi.org/10.1109/DDECS.2007.4295278},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/DDECS_OehleHW2007a.pdf}
}
122. Test und Zuverlässigkeit nanoelektronischer Systeme
Becker, B., Polian, I., Hellebrand, S., Straube, B. and Wunderlich, H.-J.
1. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'07)
Vol. 52, Munich, Germany, 26-28 March 2007, pp. 139-140
2007
URL PDF 
Abstract: Neben der zunehmenden Anfälligkeit gegenüber Fertigungsfehlern bereiten insbesondere vermehrte Parameterschwankungen, zeitabhängige Materialveränderungen und eine erhöhte Störanfälligkeit während des Betriebs massive Probleme bei der Qualitätssicherung für nanoelektronische Systeme. Für eine wirtschaftliche Produktion und einen zuverlässigen Systembetrieb wird einerseits ein robuster Entwurf unabdingbar, andererseits ist damit auch ein Paradigmenwechsel beim Test erforderlich. Anstatt lediglich defektbehaftete Systeme zu erkennen und auszusortieren, muss der Test bestimmen, ob ein System trotz einer gewissen Menge von Fehlern funktionsfähig ist, und die verbleibende Robustheit gegenüber Störungen im Betrieb charakterisieren. Im Rahmen des Projekts RealTest werden einheitliche Entwurfs- und Teststrategien entwickelt, die sowohl einen robusten Entwurf als auch eine darauf abgestimmte Qualitätssicherung unterstützen.
BibTeX:
@inproceedings{BeckeHSW2007,
  author = {Becker, Bernd and Polian, Ilia and Hellebrand, Sybille and Straube, Bernd and Wunderlich, Hans-Joachim},
  title = {{Test und Zuverlässigkeit nanoelektronischer Systeme}},
  booktitle = {1. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'07)},
  publisher = {VDE VERLAG GMBH},
  year = {2007},
  volume = {52},
  pages = {139--140},
  abstract = {Neben der zunehmenden Anfälligkeit gegenüber Fertigungsfehlern bereiten insbesondere vermehrte Parameterschwankungen, zeitabhängige Materialveränderungen und eine erhöhte Störanfälligkeit während des Betriebs massive Probleme bei der Qualitätssicherung für nanoelektronische Systeme. Für eine wirtschaftliche Produktion und einen zuverlässigen Systembetrieb wird einerseits ein robuster Entwurf unabdingbar, andererseits ist damit auch ein Paradigmenwechsel beim Test erforderlich. Anstatt lediglich defektbehaftete Systeme zu erkennen und auszusortieren, muss der Test bestimmen, ob ein System trotz einer gewissen Menge von Fehlern funktionsfähig ist, und die verbleibende Robustheit gegenüber Störungen im Betrieb charakterisieren. Im Rahmen des Projekts RealTest werden einheitliche Entwurfs- und Teststrategien entwickelt, die sowohl einen robusten Entwurf als auch eine darauf abgestimmte Qualitätssicherung unterstützen.},
  url = {http://www.vde-verlag.de/proceedings-de/463023018.html},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/ZuE_BeckeHSW2007.pdf}
}
121. Verlustleistungsoptimierende Testplanung zur Steigerung von Zuverlässigkeit und Ausbeute
Imhof, M.E., Zöllin, C.G., Wunderlich, H.-J., Mäding, N. and Leenstra, J.
1. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'07)
Vol. 52, Munich, Germany, 26-28 March 2007, pp. 69-76
2007
URL PDF 
Abstract: Die stark erhöhte durchschnittliche und maximale Verlustleistung während des Tests integrierter Schaltungen kann zu einer Beeinträchtigung der Ausbeute bei der Produktion sowie der Zuverlässigkeit im späteren Betrieb führen. Wir stellen eine Testplanung für Schaltungen mit parallelen Prüfpfaden vor, welche die Verlustleistung während des Tests reduziert. Die Testplanung wird auf ein überdeckungsproblem abgebildet, das mit einem heuristischen Lösungsverfahren effizient auch für große Schaltungen gelöst werden kann. Die Effizienz des vorgestellten Verfahrens wird sowohl für die bekannten Benchmarkschaltungen als auch für große industrielle Schaltungen demonstriert.
BibTeX:
@inproceedings{ImhofZWML2007,
  author = {Imhof, Michael E. and Zöllin, Christian G. and Wunderlich, Hans-Joachim and Mäding, Nicolas and Leenstra, Jens},
  title = {{Verlustleistungsoptimierende Testplanung zur Steigerung von Zuverlässigkeit und Ausbeute}},
  booktitle = {1. GMM/GI/ITG-Fachtagung Zuverlässigkeit und Entwurf (ZuE'07)},
  publisher = {VDE VERLAG GMBH},
  year = {2007},
  volume = {52},
  pages = {69--76},
  abstract = {Die stark erhöhte durchschnittliche und maximale Verlustleistung während des Tests integrierter Schaltungen kann zu einer Beeinträchtigung der Ausbeute bei der Produktion sowie der Zuverlässigkeit im späteren Betrieb führen. Wir stellen eine Testplanung für Schaltungen mit parallelen Prüfpfaden vor, welche die Verlustleistung während des Tests reduziert. Die Testplanung wird auf ein überdeckungsproblem abgebildet, das mit einem heuristischen Lösungsverfahren effizient auch für große Schaltungen gelöst werden kann. Die Effizienz des vorgestellten Verfahrens wird sowohl für die bekannten Benchmarkschaltungen als auch für große industrielle Schaltungen demonstriert.},
  url = {http://www.vde-verlag.de/proceedings-de/463023008.html},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/ZuE_ImhofZWML2007.pdf}
}
120. Domänenübergreifende Zuverlässigkeitsbewertung in frühen Entwicklungsphasen unter Berücksichtigung von Wechselwirkungen
Wedel, M., Göhner, P., Gäng, J., Bertsche, B., Arnaout, T. and Wunderlich, H.-J.
5. Paderborner Workshop "Entwurf mechatronischer Systeme"
Vol. 210, Paderborn, Germany, 22-23 March 2007, pp. 257-272
2007
PDF 
Keywords: Reliability, Testing, and Fault-Tolerance (CR B.8.1); Zuverlässigkeitsbewertung mechatronischer Systeme; frühe Entwicklungsphasen; domänenübergreifende Wechselwirkungen; quantitative und qualitative Methoden
Abstract: Aufgrund der unvollständigen Informationen über ein mechatronisches System stellt die Frühe Zuverlässigkeitsbewertung eine große Herausforderung dar. Um die jeweiligen Vorteile zu nutzen, wurden klassische Ansätze in den einzelnen Domänen kombiniert und in eine ganzheitliche Methode zur Zuverlässigkeitsbewertung in den Frühen Entwicklungsphasen integriert. In Zusammenarbeit verschiedener Ingenieursdisziplinen wurde die ganzheitliche Methode um die rechnergestützte Ermittlung von Fehlerzusammenhängen im Rahmen einer Risikoabschätzung und verschiedene qualitative Modellierungs- und Analyseansätze erweitert. für die systematische Analyse des wechselseitigen Einflusses der beteiligten Domänen und die Integration in die Zuverlässigkeitsbewertung wurden Wechselwirkungen zwischen den Domänen untersucht und klassifiziert.
BibTeX:
@inproceedings{WedelGGBAW2007,
  author = {Wedel, Michael and Göhner, Peter and Gäng, Jochen and Bertsche, Bernd and Arnaout, Talal and Wunderlich, Hans-Joachim},
  title = {{Domänenübergreifende Zuverlässigkeitsbewertung in frühen Entwicklungsphasen unter Berücksichtigung von Wechselwirkungen}},
  booktitle = {5. Paderborner Workshop "Entwurf mechatronischer Systeme"},
  publisher = {HNI Verlag, Paderborn},
  year = {2007},
  volume = {210},
  pages = {257--272},
  keywords = {Reliability, Testing, and Fault-Tolerance (CR B.8.1); Zuverlässigkeitsbewertung mechatronischer Systeme; frühe Entwicklungsphasen; domänenübergreifende Wechselwirkungen; quantitative und qualitative Methoden},
  abstract = {Aufgrund der unvollständigen Informationen über ein mechatronisches System stellt die Frühe Zuverlässigkeitsbewertung eine große Herausforderung dar. Um die jeweiligen Vorteile zu nutzen, wurden klassische Ansätze in den einzelnen Domänen kombiniert und in eine ganzheitliche Methode zur Zuverlässigkeitsbewertung in den Frühen Entwicklungsphasen integriert. In Zusammenarbeit verschiedener Ingenieursdisziplinen wurde die ganzheitliche Methode um die rechnergestützte Ermittlung von Fehlerzusammenhängen im Rahmen einer Risikoabschätzung und verschiedene qualitative Modellierungs- und Analyseansätze erweitert. für die systematische Analyse des wechselseitigen Einflusses der beteiligten Domänen und die Integration in die Zuverlässigkeitsbewertung wurden Wechselwirkungen zwischen den Domänen untersucht und klassifiziert.},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/EMS_WedelGGBAW2007.pdf}
}
119. Synthesis of Irregular Combinational Functions with Large Don't Care Sets
Gherman, V., Wunderlich, H.-J., Mascarenhas, R., Schloeffel, J. and Garbers, M.
Proceedings of the 17th ACM Great Lakes Symposium on VLSI (GLSVLSI'07), Stresa - Lago Maggiore, Italy, 11-13 March 2007, pp. 287-292
2007
DOI PDF 
Keywords: logic synthesis; incompletely specified functions
Abstract: A special logic synthesis problem is considered for Boolean functions which have large don't care sets and are irregular. Here, a function ist considered as irregular if the input assignments mapped to specified values ('1'or'0') are randomly spread over the definition space. Such functions can be encounted in the field of design for test. The proposed method uses ordered BDDs for logic manipulations and generates freeBDD-like covers. For the considered benchmark functions, implementations were found with the significant reduction of the node/gate count as compared to SIS or the methodes offered by a state-of-the-art BDD package.
BibTeX:
@inproceedings{GhermWMSM2007,
  author = {Gherman, Valentin and Wunderlich, Hans-Joachim and Mascarenhas, Rio and Schloeffel, Juergen and Garbers, Michael},
  title = {{Synthesis of Irregular Combinational Functions with Large Don't Care Sets}},
  booktitle = {Proceedings of the 17th ACM Great Lakes Symposium on VLSI (GLSVLSI'07)},
  publisher = {ACM},
  year = {2007},
  pages = {287--292},
  keywords = {logic synthesis; incompletely specified functions},
  abstract = {A special logic synthesis problem is considered for Boolean functions which have large don't care sets and are irregular. Here, a function ist considered as irregular if the input assignments mapped to specified values ('1'or'0') are randomly spread over the definition space. Such functions can be encounted in the field of design for test. The proposed method uses ordered BDDs for logic manipulations and generates freeBDD-like covers. For the considered benchmark functions, implementations were found with the significant reduction of the node/gate count as compared to SIS or the methodes offered by a state-of-the-art BDD package.},
  doi = {http://dx.doi.org/10.1145/1228784.1228856},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2007/GLSVLSI_GhermWMSM2007.pdf}
}
118. BIST Power Reduction Using Scan-Chain Disable in the Cell Processor
Zoellin, C., Wunderlich, H.-J., Maeding, N. and Leenstra, J.
Proceedings of the International Test Conference (ITC'06), Santa Clara, California, USA, 22-27 October 2006, pp. 1-8
2006
DOI PDF 
Keywords: microprocessor test; BIST; low power test.
Abstract: Built-in self test is a major part of the manufacturing test procedure for the Cell Processor. However, pseudo random patterns cause a high switching activity which is not effectively reduced by standard low power design techniques. If special care is not taken, the scan-speed may have to be reduced significantly, thus extending test time and costs.
In this paper, we describe a test power reduction method for logic BIST which uses test scheduling, planning and scan-gating. In LBIST, effective patterns that detect additional faults are very scarce after a few dozens of scan cycles and often less than one pattern in a hundred detects new faults. In most cases, such an effective pattern requires only a reduced set of the available scan chains to detect the fault and all don´t-care scan chains can be disabled, therefore significantly reducing test power.
BibTeX:
@inproceedings{ZoellWML2006,
  author = {Zoellin, Christian and Wunderlich, Hans-Joachim and Maeding, Nicolas and Leenstra, Jens},
  title = {{BIST Power Reduction Using Scan-Chain Disable in the Cell Processor}},
  booktitle = {Proceedings of the International Test Conference (ITC'06)},
  publisher = {IEEE},
  year = {2006},
  pages = {1--8},
  keywords = {microprocessor test; BIST; low power test.},
  abstract = {Built-in self test is a major part of the manufacturing test procedure for the Cell Processor. However, pseudo random patterns cause a high switching activity which is not effectively reduced by standard low power design techniques. If special care is not taken, the scan-speed may have to be reduced significantly, thus extending test time and costs. 
In this paper, we describe a test power reduction method for logic BIST which uses test scheduling, planning and scan-gating. In LBIST, effective patterns that detect additional faults are very scarce after a few dozens of scan cycles and often less than one pattern in a hundred detects new faults. In most cases, such an effective pattern requires only a reduced set of the available scan chains to detect the fault and all don´t-care scan chains can be disabled, therefore significantly reducing test power.}, doi = {http://dx.doi.org/10.1109/TEST.2006.297695}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2006/ITC_ZoellWML2006.pdf} }
117. Structural-based Power-aware Assignment of Don't Cares for Peak Power Reduction during Scan Testing
Badereddine, N., Girard, P., Pravossoudovitch, S., Landrault, C., Arnaud, V. and Wunderlich, H.-J.
Proceedings of the IFIP International Conference on Very Large Scale Integration (VLSI-SoC), Nice, France, 16-18 October 2006, pp. 403-408
2006
DOI PDF 
Abstract: Scan architectures, though widely used in modern designs for testing purpose, are expensive in power consumption. In this paper, we first discuss the issues of excessive peak power consumption during scan testing. We next show that taking care of high current levels during the test cycle (i.e. between launch and capture) is highly relevant so as to avoid noise phenomena such as irdrop or ground bounce. Then, we propose a solution based on power-aware assignment of don´t care bits in deterministic test patterns that considers structural information of the circuit under test. Experiments have been performed on ISCAS´89 and ITC´99 benchmark circuits with the proposed structural-based power-aware X-Filling technique. These results show that the proposed technique provides the best tradeoff between peak power reduction and increase of test sequence length.
BibTeX:
@inproceedings{BaderGPLAW2006,
  author = {Badereddine, Nabil and Girard, Patrick and Pravossoudovitch, Serge and Landrault, Christian and Arnaud, Virazel and Wunderlich, Hans-Joachim},
  title = {{Structural-based Power-aware Assignment of Don't Cares for Peak Power Reduction during Scan Testing}},
  booktitle = {Proceedings of the IFIP International Conference on Very Large Scale Integration (VLSI-SoC)},
  publisher = {IEEE},
  year = {2006},
  pages = {403--408},
  abstract = {Scan architectures, though widely used in modern designs for testing purpose, are expensive in power consumption. In this paper, we first discuss the issues of excessive peak power consumption during scan testing. We next show that taking care of high current levels during the test cycle (i.e. between launch and capture) is highly relevant so as to avoid noise phenomena such as irdrop or ground bounce. Then, we propose a solution based on power-aware assignment of don´t care bits in deterministic test patterns that considers structural information of the circuit under test. Experiments have been performed on ISCAS´89 and ITC´99 benchmark circuits with the proposed structural-based power-aware X-Filling technique. These results show that the proposed technique provides the best tradeoff between peak power reduction and increase of test sequence length.},
  doi = {http://dx.doi.org/10.1109/VLSISOC.2006.313222},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2006/VLSI-SOC_BaderGPLAW2006.pdf}
}
116. DFG-Projekt RealTest - Test und Zuverlässigkeit nanoelektronischer Systeme;
DFG-Project – Test and Reliability of Nano-Electronic Systems

Becker, B., Polian, I., Hellebrand, S., Straube, B. and Wunderlich, H.-J.
it - Information Technology
Vol. 48(5), October 2006, pp. 304-311
2006
DOI PDF 
Keywords: Nanoelektronik; Entwurf; Test; Zuverlässigkeit; Fehlertoleranz/Nano-electronics; Design; Test; Dependability; Fault Tolerance
Abstract: Entwurf, Verifikation und Test zuverlässiger nanoelektronischer Systeme erfordern grundlegend neue Methoden und Ansätze. Ein robuster Entwurf wird unabdingbar, um Fertigungsfehler, Parameterschwankungen, zeitabhängige Materialveränderungen und vorübergehende Störungen in gewissem Umfang zu tolerieren. Gleichzeitig verlieren gerade dadurch viele traditionelle Testverfahren ihre Aussagekraft. Im Rahmen des Projekts RealTest werden einheitliche Entwurfs- und Teststrategien entwickelt, die sowohl einen robusten Entwurf als auch eine darauf abgestimmte Qualitätssicherung unterstützen.

The increasing number of fabrication defects, spatial and temporal variability of parameters, as well as the growing impact of soft errors in nanoelectronic systems require a paradigm shift in design, verification and test. A robust design is mandatory to ensure dependable systems and acceptable yields. The quest for design robustness, however, invalidates many traditional approaches for testing and implies enormous challenges. Within the framework of the RealTest project unified design and test strategies are developed to support a robust design and a coordinated quality assurance after the production and during the lifetime of a system.

BibTeX:
@article{BeckePHSW2006,
  author = {Becker, Bernd and Polian, Ilia and Hellebrand, Sybille and Straube, Bernd and Wunderlich, Hans-Joachim},
  title = {{DFG-Projekt RealTest - Test und Zuverlässigkeit nanoelektronischer Systeme;
DFG-Project – Test and Reliability of Nano-Electronic Systems}}, journal = {it - Information Technology}, publisher = {Oldenbourg Wissenschaftsverlag}, year = {2006}, volume = {48}, number = {5}, pages = {304--311}, keywords = {Nanoelektronik; Entwurf; Test; Zuverlässigkeit; Fehlertoleranz/Nano-electronics; Design; Test; Dependability; Fault Tolerance}, abstract = {Entwurf, Verifikation und Test zuverlässiger nanoelektronischer Systeme erfordern grundlegend neue Methoden und Ansätze. Ein robuster Entwurf wird unabdingbar, um Fertigungsfehler, Parameterschwankungen, zeitabhängige Materialveränderungen und vorübergehende Störungen in gewissem Umfang zu tolerieren. Gleichzeitig verlieren gerade dadurch viele traditionelle Testverfahren ihre Aussagekraft. Im Rahmen des Projekts RealTest werden einheitliche Entwurfs- und Teststrategien entwickelt, die sowohl einen robusten Entwurf als auch eine darauf abgestimmte Qualitätssicherung unterstützen.

The increasing number of fabrication defects, spatial and temporal variability of parameters, as well as the growing impact of soft errors in nanoelectronic systems require a paradigm shift in design, verification and test. A robust design is mandatory to ensure dependable systems and acceptable yields. The quest for design robustness, however, invalidates many traditional approaches for testing and implies enormous challenges. Within the framework of the RealTest project unified design and test strategies are developed to support a robust design and a coordinated quality assurance after the production and during the lifetime of a system.}, doi = {http://dx.doi.org/10.1524/itit.2006.48.5.304}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2006/IT_BeckePHSW2006.pdf} }

115. Minimizing Peak Power Consumption during Scan Testing: Test Pattern Modification with X Filling Heuristics
Badereddine, N., Girard, P., Pravossoudovitch, S., Landrault, C., Virazel, A. and Wunderlich, H.-J.
Proceedings of the Conference on Design & Test of Integrated Systems in Nanoscale Technology (DTIS'06), Tunis, Tunisia, 5-7 September 2006, pp. 359-364
2006
DOI PDF 
Keywords: Dft; scan testing; power-aware testing; peak power consumption
Abstract: Scan architectures, though widely used in modern designs, are expensive in power consumption. In this paper, we discuss the issues of excessive peak power consumption during scan testing. We show that taking care of high current levels during the test cycle (i.e. between launch and capture) is highly relevant to avoid noise phenomena such as IR-drop or ground bounce. We propose a solution based on power-aware assignment of don´t care bits in deterministic test patterns. For ISCAS´89 and ITC´99 benchmark circuits, this approach reduces peak power during the test cycle up to 89% compared to a random filling solution.
BibTeX:
@inproceedings{BaderGPLVW2006,
  author = {Badereddine, Nabil and Girard, Patrick and Pravossoudovitch, Serge and Landrault, Christian and Virazel, Arnaud and Wunderlich, Hans-Joachim},
  title = {{Minimizing Peak Power Consumption during Scan Testing: Test Pattern Modification with X Filling Heuristics}},
  booktitle = {Proceedings of the Conference on Design & Test of Integrated Systems in Nanoscale Technology (DTIS'06)},
  publisher = {Institute of Electrical and Electronics Engineers},
  year = {2006},
  pages = {359--364},
  keywords = {Dft; scan testing; power-aware testing; peak power consumption},
  abstract = {Scan architectures, though widely used in modern designs, are expensive in power consumption. In this paper, we discuss the issues of excessive peak power consumption during scan testing. We show that taking care of high current levels during the test cycle (i.e. between launch and capture) is highly relevant to avoid noise phenomena such as IR-drop or ground bounce. We propose a solution based on power-aware assignment of don´t care bits in deterministic test patterns. For ISCAS´89 and ITC´99 benchmark circuits, this approach reduces peak power during the test cycle up to 89% compared to a random filling solution.},
  doi = {http://dx.doi.org/10.1109/DTIS.2006.1708693},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2006/DTIS_BaderGPLVW2006.pdf}
}
114. Deterministic Logic BIST for Transition Fault Testing
Gherman, V., Wunderlich, H.-J., Schloeffel, J. and Garbers, M.
Proceedings of the 11th European Test Symposium (ETS'06), Southampton, United Kingdom, 21-24 May 2006, pp. 123-130
2006
DOI URL PDF 
Keywords: deterministic logic BIST; delay test
Abstract: BIST is an attractive approach to detect delay faults due to its inherent support for at-speed test. Deterministic logic BIST (DLBIST) is a technique which was successfully applied to stuck-at fault testing. As delay faults have lower random pattern testability than stuck-at faults, the need for DLBIST schemes is increased. Nevertheless, an extension to delay fault testing is not trivial, since this necessitates the application of pattern pairs. Consequently, delay fault testing is expected to require a larger mapping effort and logic overhead than stuck-at fault testing. In this paper, we consider the so-called transition fault model, which is widely used for complexity reasons. We present an extension of a DLBIST scheme for transition fault testing. Functional justification is used to generate the required pattern pairs. The efficiency of the extended scheme is investigated by using industrial benchmark circuits.
BibTeX:
@inproceedings{GhermWSG2006,
  author = {Gherman, Valentin and Wunderlich, Hans-Joachim and Schloeffel, Juergen and Garbers, Michael},
  title = {{Deterministic Logic BIST for Transition Fault Testing}},
  booktitle = {Proceedings of the 11th European Test Symposium (ETS'06)},
  publisher = {IEEE Computer Society},
  year = {2006},
  pages = {123--130},
  keywords = {deterministic logic BIST; delay test},
  abstract = {BIST is an attractive approach to detect delay faults due to its inherent support for at-speed test. Deterministic logic BIST (DLBIST) is a technique which was successfully applied to stuck-at fault testing. As delay faults have lower random pattern testability than stuck-at faults, the need for DLBIST schemes is increased. Nevertheless, an extension to delay fault testing is not trivial, since this necessitates the application of pattern pairs. Consequently, delay fault testing is expected to require a larger mapping effort and logic overhead than stuck-at fault testing. In this paper, we consider the so-called transition fault model, which is widely used for complexity reasons. We present an extension of a DLBIST scheme for transition fault testing. Functional justification is used to generate the required pattern pairs. The efficiency of the extended scheme is investigated by using industrial benchmark circuits.},
  url = {http://www.computer.org/csdl/proceedings/ets/2006/2566/00/25660123-abs.html},
  doi = {http://dx.doi.org/10.1109/ETS.2006.12},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2006/ETS_GhermWSG2006.pdf}
}
113. Software-Based Self-Test of Processors under Power Constraints
Zhou, J. and Wunderlich, H.-J.
Proceedings of the 9th Conference on Design, Automation and Test in Europe (DATE'06), Munich, Germany, 6-10 March 2006, pp. 430-436
2006
DOI URL PDF 
Keywords: test program generation; processor test; low power test
Abstract: Software-based self-test (SBST) of processors offers many benefits, such as dispense with expensive test equipments, test execution during maintenance and in the field or initialization tests for the whole system. In this paper, for the first time a structural SBST methodology is proposed which optimizes energy, average power consumption, test length and fault coverage at the same time.
BibTeX:
@inproceedings{ZhouW2006,
  author = {Zhou, Jun and Wunderlich, Hans-Joachim},
  title = {{Software-Based Self-Test of Processors under Power Constraints}},
  booktitle = {Proceedings of the 9th Conference on Design, Automation and Test in Europe (DATE'06)},
  publisher = {European Design and Automation Association, Leuven, Belgium},
  year = {2006},
  pages = {430--436},
  keywords = {test program generation; processor test; low power test},
  abstract = {Software-based self-test (SBST) of processors offers many benefits, such as dispense with expensive test equipments, test execution during maintenance and in the field or initialization tests for the whole system. In this paper, for the first time a structural SBST methodology is proposed which optimizes energy, average power consumption, test length and fault coverage at the same time.},
  url = {http://www.computer.org/csdl/proceedings/date/2006/8011/01/01656919-abs.html},
  doi = {http://dx.doi.org/10.1109/DATE.2006.243798},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2006/DATE_ZhouW2006.pdf}
}
112. X-Masking During Logic BIST and its Impact on Defect Coverage
Tang, Y., Wunderlich, H.-J., Engelke, P., Polian, I., Becker, B., Schlöffel, J., Hapke, F. and Wittke, M.
IEEE Transactions on Very Large Scale Integrated (VLSI) Systems
Vol. 14(2), February 2006, pp. 193-202
2006
DOI URL PDF 
Keywords: Defect coverage, logic built-in self test (BIST), resistive bridging faults (RBFs), X-masking
Abstract: We present a technique for making a circuit ready for logic built-in self test by masking unknown values at its outputs. In order to keep the silicon area costs low, some known bits in output responses are also allowed to me masked. These bits are selected based on a stuck-at n-detection based metric, such that the impact of masking on the defect coverage is minimal. An analysis based on a probabilistic model for resistive short defects indicates that the coverage loss for unmodeled defects is negligible for relatively low values of n.
BibTeX:
@article{TangWEPBSHW2006,
  author = {Tang, Yuyi and Wunderlich, Hans-Joachim and Engelke, Piet and Polian, Ilian and Becker, Bernd and Schlöffel, Jürgen and Hapke, Friedrich and Wittke, Michael},
  title = {{X-Masking During Logic BIST and its Impact on Defect Coverage}},
  journal = {IEEE Transactions on Very Large Scale Integrated (VLSI) Systems},
  publisher = {The Institute of Electrical and Electronics Engineers, Inc.},
  year = {2006},
  volume = {14},
  number = {2},
  pages = {193--202},
  keywords = {Defect coverage, logic built-in self test (BIST), resistive bridging faults (RBFs), X-masking},
  abstract = {We present a technique for making a circuit ready for logic built-in self test by masking unknown values at its outputs. In order to keep the silicon area costs low, some known bits in output responses are also allowed to me masked. These bits are selected based on a stuck-at n-detection based metric, such that the impact of masking on the defect coverage is minimal. An analysis based on a probabilistic model for resistive short defects indicates that the coverage loss for unmodeled defects is negligible for relatively low values of n.},
  url = {http://dl.acm.org/citation.cfm?id=1140580.1140589},
  doi = {http://dx.doi.org/10.1109/TVLSI.2005.863742},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2006/VLSI_TangWEPBSHW2006.pdf}
}
111. Some Common Aspects of Design Validation, Debug and Diagnosis
Arnaout, T., Bartsch, G. and Wunderlich, H.-J.
Proceedings of the 3rd IEEE International Workshop on Electronic Design, Test and Applications (DELTA'06), Kuala Lumpur, Malaysia, 17-19 January 2006, pp. 3-10
2006
DOI URL PDF 
Abstract: Design, Verification and Test of integrated circuits with millions of gates put strong requirements on design time, test volume, test application time, test speed and diagnostic resolution. In this paper, an overview is given on the common aspects of these tasks and how they interact. Diagnosis techniques may be used after manufacturing, for chip characterization and field return analysis, and even for rapid prototyping.
BibTeX:
@inproceedings{ArnaoBW2006,
  author = {Arnaout, Talal and Bartsch, Günter and Wunderlich, Hans-Joachim},
  title = {{Some Common Aspects of Design Validation, Debug and Diagnosis}},
  booktitle = {Proceedings of the 3rd IEEE International Workshop on Electronic Design, Test and Applications (DELTA'06)},
  publisher = {IEEE Computer Society},
  year = {2006},
  pages = {3--10},
  abstract = {Design, Verification and Test of integrated circuits with millions of gates put strong requirements on design time, test volume, test application time, test speed and diagnostic resolution. In this paper, an overview is given on the common aspects of these tasks and how they interact. Diagnosis techniques may be used after manufacturing, for chip characterization and field return analysis, and even for rapid prototyping.},
  url = {http://www.computer.org/csdl/proceedings/delta/2006/2500/00/25000003-abs.html},
  doi = {http://dx.doi.org/10.1109/DELTA.2006.79},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2006/DELTA_ArnaoBW2006.pdf}
}
110. On the Reliability Evaluation of SRAM-based FPGA Designs
Héron, O., Arnaout, T. and Wunderlich, H.-J.
Proceedings of the 15th IEEE International Conference on Field Programmable Logic and Applications (FPL'05), Tampere, Finland, 24-26 August 2005, pp. 403-408
2005
DOI URL PDF 
Abstract: Benefits of Field Programmable Gate Arrays (FPGAs) have lead to a spectrum of use ranging from consumer products to astronautics. This diversity necessitates the need to evaluate the reliability of the FPGA, because of their high susceptibility to soft errors, which are due to the high density of embedded SRAM cells. Reliability evaluation is an important step in designing highly reliable systems, which results in a strong competitive advantage in today's marketplace. This paper proposes a mathematical model able to evaluate and therefore help to improve the reliability of SRAM-based FPGAs.
BibTeX:
@inproceedings{HeAW2005,
  author = {Héron, Oliver and Arnaout,Talal and Wunderlich, Hans-Joachim},
  title = {{On the Reliability Evaluation of SRAM-based FPGA Designs}},
  booktitle = {Proceedings of the 15th IEEE International Conference on Field Programmable Logic and Applications (FPL'05)},
  publisher = {IEEE Computer Society},
  year = {2005},
  pages = {403--408},
  abstract = {Benefits of Field Programmable Gate Arrays (FPGAs) have lead to a spectrum of use ranging from consumer products to astronautics. This diversity necessitates the need to evaluate the reliability of the FPGA, because of their high susceptibility to soft errors, which are due to the high density of embedded SRAM cells. Reliability evaluation is an important step in designing highly reliable systems, which results in a strong competitive advantage in today's marketplace. This paper proposes a mathematical model able to evaluate and therefore help to improve the reliability of SRAM-based FPGAs.},
  url = {http://www.computer.org/csdl/proceedings/fpl/2005/9362/00/01515755-abs.html},
  doi = {http://dx.doi.org/10.1109/FPL.2005.1515755},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2005/FPL_HeAW2005.pdf}
}
109. Development of an Audio Player as System-on-a-Chip using an Open Source Platform
Pattara, K., Azuara, L., Dorsch, R. and Wunderlich, H.-J.
Proceedings of the IEEE International Symposium on Circuits and Systems (ISCAS'05)
Vol. 3, Kobe, Japan, 23-26 May 2005, pp. 2935-2938
2005
DOI PDF 
Abstract: Open source software are becoming more widely-used, notably in the server and desktop applications. For embedded systems development, usage of open source software can also reduce development and licensing costs. We report on our experience in developing a Systemon- a-Chip (SoC) audio player using various open source components in both hardware and software parts as well as in the development process. The Ogg Vorbis audio decoder targeted for limited computing resource and low power consumption devices was developed on the free LEON SoC platform, which features SPARC-V8 architecture compatible processor and AMBA bus. The decoder runs on the open source RTEMS operating system making use of the royalty-free open source Vorbis library. We also aim to illustrate the use of hardware/software co-design techniques. Therefore, in order to speed up the decoding process, after an analysis, a computing-intensive part of the decoding algorithm was selected and designed as an AMBA compatible hardware core. The demonstration prototype was built on the XESS XSV-800 prototyping board using GNU/Linux workstations as development workstations. This project shows that development of SoC using open source platform is viable and might be the preferred choice in the future.
BibTeX:
@inproceedings{PattaADW2005,
  author = {Pattara, Kiatisevi and Azuara, Luis and Dorsch, Rainer and Wunderlich, Hans-Joachim},
  title = {{Development of an Audio Player as System-on-a-Chip using an Open Source Platform}},
  booktitle = {Proceedings of the IEEE International Symposium on Circuits and Systems (ISCAS'05)},
  publisher = {IEEE Computer Society},
  year = {2005},
  volume = {3},
  pages = {2935--2938},
  abstract = {Open source software are becoming more widely-used, notably in the server and desktop applications. For embedded systems development, usage of open source software can also reduce development and licensing costs. We report on our experience in developing a Systemon- a-Chip (SoC) audio player using various open source components in both hardware and software parts as well as in the development process. The Ogg Vorbis audio decoder targeted for limited computing resource and low power consumption devices was developed on the free LEON SoC platform, which features SPARC-V8 architecture compatible processor and AMBA bus. The decoder runs on the open source RTEMS operating system making use of the royalty-free open source Vorbis library. We also aim to illustrate the use of hardware/software co-design techniques. Therefore, in order to speed up the decoding process, after an analysis, a computing-intensive part of the decoding algorithm was selected and designed as an AMBA compatible hardware core. The demonstration prototype was built on the XESS XSV-800 prototyping board using GNU/Linux workstations as development workstations. This project shows that development of SoC using open source platform is viable and might be the preferred choice in the future.},
  doi = {http://dx.doi.org/10.1109/ISCAS.2005.1465242},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2005/ISCAS_PattaADW2005.pdf}
}
108. From Embedded Test to Embedded Diagnosis
Wunderlich, H.-J.
Proceedings of the 10th IEEE European Test Sypmposium (ETS'05), Tallinn, Estonia, 22-25 May 2005, pp. 216-221
2005
DOI URL PDF 
Abstract: Testing integrated circuits with millions of transistors puts strong requirements on test volume, test application time, test speed, and test resolution. To overcome these challenges, it is widely accepted to partition test resources between the automatic test equipment (ATE) and the circuit under test (CUT). These strategies may reach from simple test data compression/decompression schemes to implementing a complete built-in self-test. Very often these schemes come with reduced diagnostic resolution.
In this paper, an overview is given on techniques for embedding test into a circuit while still keeping diagnostic capabilities. Built-in diagnosis techniques may be used after manufacturing, for chip characterization and field return analysis, and even for rapid prototyping.
BibTeX:
@inproceedings{Wunde2005,
  author = {Wunderlich, Hans-Joachim},
  title = {{From Embedded Test to Embedded Diagnosis}},
  booktitle = {Proceedings of the 10th IEEE European Test Sypmposium (ETS'05)},
  publisher = {IEEE Computer Society},
  year = {2005},
  pages = {216--221},
  abstract = {Testing integrated circuits with millions of transistors puts strong requirements on test volume, test application time, test speed, and test resolution. To overcome these challenges, it is widely accepted to partition test resources between the automatic test equipment (ATE) and the circuit under test (CUT). These strategies may reach from simple test data compression/decompression schemes to implementing a complete built-in self-test. Very often these schemes come with reduced diagnostic resolution. 
In this paper, an overview is given on techniques for embedding test into a circuit while still keeping diagnostic capabilities. Built-in diagnosis techniques may be used after manufacturing, for chip characterization and field return analysis, and even for rapid prototyping.}, url = {http://www.computer.org/csdl/proceedings/ets/2005/2341/00/23410216-abs.html}, doi = {http://dx.doi.org/10.1109/ETS.2005.26}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2005/ETS_Wunde2005.pdf} }
107. Implementing a Scheme for External Deterministic Self-Test
Hakmi, A.W., Wunderlich, H.-J., Gherman, V., Garbers, M. and Schlöffel, J.
Proceedings of the 23rd IEEE VLSI Test Sypmposium (VTS'05), Palm Springs, California, USA, 1-5 May 2005, pp. 101-106
2005
DOI URL PDF 
Keywords: deterministic self-test; external BIST; test resource partitioning; test data compression
Abstract: A method for test resource partitioning is introduced which keeps the design-for-test logic test set independent and moves the test pattern dependent information to an external, programmable chip. The scheme includes a new decompression scheme for a fast and efficient communication between the external test chip and the circuit under test. The hardware costs on chip are significantly lower compared with a deterministic BIST scheme while the test application time is still in the same range. The proposed scheme is fully programmable, flexible and can be reused at board level for testing in the field. Keywords: Deterministic self-test, external BIST, test resource partitioning, test data compression.
BibTeX:
@inproceedings{HakmiWGGS2005,
  author = {Hakmi, Abdul Wahid and Wunderlich, Hans-Joachim and Gherman, Valentin and Garbers, Michael and Schlöffel, Jürgen},
  title = {{Implementing a Scheme for External Deterministic Self-Test}},
  booktitle = {Proceedings of the 23rd IEEE VLSI Test Sypmposium (VTS'05)},
  publisher = {IEEE Computer Society},
  year = {2005},
  pages = {101--106},
  keywords = {deterministic self-test; external BIST; test resource partitioning; test data compression},
  abstract = {A method for test resource partitioning is introduced which keeps the design-for-test logic test set independent and moves the test pattern dependent information to an external, programmable chip. The scheme includes a new decompression scheme for a fast and efficient communication between the external test chip and the circuit under test. The hardware costs on chip are significantly lower compared with a deterministic BIST scheme while the test application time is still in the same range. The proposed scheme is fully programmable, flexible and can be reused at board level for testing in the field. Keywords: Deterministic self-test, external BIST, test resource partitioning, test data compression.},
  url = {http://www.computer.org/csdl/proceedings/vts/2005/2314/00/23140101-abs.html},
  doi = {http://dx.doi.org/10.1109/VTS.2005.50},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2005/VTS_HakmiWGGS2005.pdf}
}
106. Sequence Length, Area Cost and Non-Target Defect Coverage Tradeoffs in Deterministic Logic BIST
Engelke, P., Gherman, V., Polian, I., Tang, Y., Wunderlich, H.-J. and Becker, B.
Proceedings of the 8th IEEE Workshop on Design and Diagnostics of Electronic Circuits and Systems (DDECS'05), Sopron, Hungary, 13-16 April 2005, pp. 11-18
2005
PDF 
Keywords: Test Tradeoffs; Logic BIST; Defect Coverage; Resistive Bridging Faults
Abstract: For the first time, we study the coverage of non-target defects for Deterministic Logic BIST (DLBIST) architecture. We consider several DLBIST implementation options that result in test sequences of different lengths. Resistive bridging faults are used as a surrogate of non-target defects. Experimental data obtained for largest ISCAS benchmarks suggests that, although DLBIST always guarantees complete stuck-at coverage, test sequence length does influence the non-target defect detection capabilities. For circuits with a large fraction of random-pattern resistant faults, the embedded deterministic patterns as well as a sufficient amount of random patterns are both demonstrated to be essential for non-target defect detection. It turns out, moreover, that area cost is lower for DLBIST solutions with longer test sequences, due to additional degrees of freedom for the embedding procedure and a lower number of faults undetected by pseudorandom patterns. This implies that DLBIST is particularly effective in covering non-target defects.
BibTeX:
@inproceedings{EngelGPTWB2005,
  author = {Engelke, Piet and Gherman, Valentin and Polian, Ilia and Tang, Yuyi and Wunderlich, Hans-Joachim and Becker, Bernd},
  title = {{Sequence Length, Area Cost and Non-Target Defect Coverage Tradeoffs in Deterministic Logic BIST}},
  booktitle = {Proceedings of the 8th IEEE Workshop on Design and Diagnostics of Electronic Circuits and Systems (DDECS'05)},
  publisher = {IEEE Computer Society},
  year = {2005},
  pages = {11--18},
  keywords = {Test Tradeoffs; Logic BIST; Defect Coverage; Resistive Bridging Faults},
  abstract = {For the first time, we study the coverage of non-target defects for Deterministic Logic BIST (DLBIST) architecture. We consider several DLBIST implementation options that result in test sequences of different lengths. Resistive bridging faults are used as a surrogate of non-target defects. Experimental data obtained for largest ISCAS benchmarks suggests that, although DLBIST always guarantees complete stuck-at coverage, test sequence length does influence the non-target defect detection capabilities. For circuits with a large fraction of random-pattern resistant faults, the embedded deterministic patterns as well as a sufficient amount of random patterns are both demonstrated to be essential for non-target defect detection. It turns out, moreover, that area cost is lower for DLBIST solutions with longer test sequences, due to additional degrees of freedom for the embedding procedure and a lower number of faults undetected by pseudorandom patterns. This implies that DLBIST is particularly effective in covering non-target defects.},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2005/DDECS_EngelGPTWB2005.pdf}
}
105. Frühe Zuverlässigkeitsanalyse mechatronischer Systeme;
Early Reliability Analysis for Mechatronic Systems

Jäger, P., Bertsche, B., Arnout, T. and Wunderlich, H.-J.
22. VDI Tagung Technische Zuverlässigkeit (TTZ'05)
Vol. 1884, Stuttgart, Germany, 7-8 April 2005, pp. 39-56
2005
URL PDF 
Abstract: Mechatronische Systeme sind heutzutage allgegenwärtig. Durch die Kombination aus Mechanik und moderner Informationsverarbeitung (Elektronik und Software) kann die Leistungsfähigkeit von Produkten deutlich gesteigert werden. Ein Beispiel hierfür sind CVTGetriebe. Die ersten Getriebe dieser Bauart waren weitgehend mechanisch/hydraulische Strukturen [1]. Modernere CVT-Getriebe, wie das ZF Ecotronic [2] oder das Front-CVT der Mercedes-Benz A-Klasse [3] verfügen über eine elektronische Steuerung, die die Leistungsfähigkeit des Getriebes zu steigern vermag aber auch zu UnZuverlässigkeiten führen kann. In diesem Beitrag soll das Thema der Zuverlässigkeit mechatronischer Systeme aufgegriffen werden und insbesondere vor dem Hintergrund der Zuverlässigkeitsarbeit in Frühen Entwicklungsphasen diskutiert werden, da namentlich die Konzeptphase durch die Auswahl des richtigen Konzeptes für den endgültigen Produkterfolg hauptverantwortlich ist. Hierzu wird speziell das Thema der Informationsgewinnung in Frühen Phasen thematisiert, da der Erfolg der Zuverlässigkeitsarbeit maßgeblich von der Daten- und Informationslage abhängig ist.
BibTeX:
@inproceedings{JaegerBAW2005,
  author = {Jäger, Patrick and Bertsche, Bernd and Arnout, Talal and Wunderlich, Hans-Joachim},
  title = {{Frühe Zuverlässigkeitsanalyse mechatronischer Systeme;
Early Reliability Analysis for Mechatronic Systems}}, booktitle = {22. VDI Tagung Technische Zuverlässigkeit (TTZ'05)}, publisher = {VDE VERLAG GMBH}, year = {2005}, volume = {1884}, pages = {39--56}, abstract = {Mechatronische Systeme sind heutzutage allgegenwärtig. Durch die Kombination aus Mechanik und moderner Informationsverarbeitung (Elektronik und Software) kann die Leistungsfähigkeit von Produkten deutlich gesteigert werden. Ein Beispiel hierfür sind CVTGetriebe. Die ersten Getriebe dieser Bauart waren weitgehend mechanisch/hydraulische Strukturen [1]. Modernere CVT-Getriebe, wie das ZF Ecotronic [2] oder das Front-CVT der Mercedes-Benz A-Klasse [3] verfügen über eine elektronische Steuerung, die die Leistungsfähigkeit des Getriebes zu steigern vermag aber auch zu UnZuverlässigkeiten führen kann. In diesem Beitrag soll das Thema der Zuverlässigkeit mechatronischer Systeme aufgegriffen werden und insbesondere vor dem Hintergrund der Zuverlässigkeitsarbeit in Frühen Entwicklungsphasen diskutiert werden, da namentlich die Konzeptphase durch die Auswahl des richtigen Konzeptes für den endgültigen Produkterfolg hauptverantwortlich ist. Hierzu wird speziell das Thema der Informationsgewinnung in Frühen Phasen thematisiert, da der Erfolg der Zuverlässigkeitsarbeit maßgeblich von der Daten- und Informationslage abhängig ist.}, url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-118&engl=0}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2005/TTZ_JaegeBAW2005.pdf} }
104. Efficient Pattern Mapping For Deterministic Logic BIST
Gherman, V., Wunderlich, H.-J., Vranken, H., Hapke, F., Wittke, M. and Garbers, M.
Proceedings of the 35th IEEE International Test Conference (ITC'04), Charlotte, New York, USA, 26-28 October 2004, pp. 48-56
2004
DOI URL PDF 
Keywords: Logic BIST; BDDs
Abstract: Deterministic logic BIST (DLBIST) is an attractive test strategy, since it combines advantages of deterministic external testing and pseudo-random LBIST. Unfortunately, previously published DLBIST methods are unsuited for large ICs, since computing time and memory consumption of the DLBIST synthesis algorithms increase exponentially, or at least cubically, with the circuit size.
In this paper, we propose a novel DLBIST synthesis procedure that has nearly linear complexity in terms of both computing time and memory consumption. The new algorithms are based on binary decision diagrams (BDDs). We demonstrate the efficiency of the new algorithms for industrial designs up to 2M gates.
BibTeX:
@inproceedings{GhermWVHWG2004,
  author = {Gherman, Valentin and Wunderlich, Hans-Joachim and Vranken, Harald and Hapke, Friedrich and Wittke, Michael and Garbers, Michael},
  title = {{Efficient Pattern Mapping For Deterministic Logic BIST}},
  booktitle = {Proceedings of the 35th IEEE International Test Conference (ITC'04)},
  publisher = {IEEE Computer Society},
  year = {2004},
  pages = {48--56},
  keywords = {Logic BIST; BDDs},
  abstract = {Deterministic logic BIST (DLBIST) is an attractive test strategy, since it combines advantages of deterministic external testing and pseudo-random LBIST. Unfortunately, previously published DLBIST methods are unsuited for large ICs, since computing time and memory consumption of the DLBIST synthesis algorithms increase exponentially, or at least cubically, with the circuit size. 
In this paper, we propose a novel DLBIST synthesis procedure that has nearly linear complexity in terms of both computing time and memory consumption. The new algorithms are based on binary decision diagrams (BDDs). We demonstrate the efficiency of the new algorithms for industrial designs up to 2M gates.}, url = {http://www.computer.org/csdl/proceedings/itc/2004/2741/00/27410048-abs.html}, doi = {http://dx.doi.org/10.1109/TEST.2004.1386936}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2004/ITC_GhermWVHWG2004.pdf} }
103. X-Masking During Logic BIST and Its Impact on Defect Coverage
Tang, Y., Wunderlich, H.-J., Vranken, H., Hapke, F., Wittke, M., Engelke, P., Polian, I. and Becker, B.
Proceedings of the 35th IEEE International Test Conference (ITC'04), Charlotte, New York, USA, 26-28 October 2004, pp. 442-451
2004
DOI URL PDF 
Keywords: X-Masking; Logic BIST; Defect Coverage; Resistive Bridging Faults
Abstract: We present a technique for making a circuit ready for Logic BIST by masking unknown values at its outputs. In order to keep the area overhead low, some known bits in output responses are also allowed to be masked. These bits are selected based on a stuck-at n-detection based metric, such that the impact of masking on the defect coverage is minimal. An analysis based on a probabilistic model for resistive short defects indicates that the coverage loss for unmodeled defects is negligible for relatively low values of n.
BibTeX:
@inproceedings{TangWVHWEPB2004,
  author = {Tang, Yuyi and Wunderlich, Hans-Joachim and Vranken, Harald and Hapke, Friedrich and Wittke, Michael and Engelke, Piet and Polian, Ilian and Becker, Bernd},
  title = {{X-Masking During Logic BIST and Its Impact on Defect Coverage}},
  booktitle = {Proceedings of the 35th IEEE International Test Conference (ITC'04)},
  publisher = {IEEE Computer Society},
  year = {2004},
  pages = {442--451},
  keywords = {X-Masking; Logic BIST; Defect Coverage; Resistive Bridging Faults},
  abstract = {We present a technique for making a circuit ready for Logic BIST by masking unknown values at its outputs. In order to keep the area overhead low, some known bits in output responses are also allowed to be masked. These bits are selected based on a stuck-at n-detection based metric, such that the impact of masking on the defect coverage is minimal. An analysis based on a probabilistic model for resistive short defects indicates that the coverage loss for unmodeled defects is negligible for relatively low values of n.},
  url = {http://www.computer.org/csdl/proceedings/itc/2004/2741/00/27410442-abs.html},
  doi = {http://dx.doi.org/10.1109/TEST.2004.1386980},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2004/ITC_TangWVHWEPB2004.pdf}
}
102. Efficient Pattern Mapping For Deterministic Logic BIST
Gherman, V., Wunderlich, H.-J., Vranken, H., Hapke, F. and Wittke, M.
Proceedings of the 9th IEEE European Test Sypmposium (ETS'04), Ajaccio, Corsica, France, 23-26 May 2004, pp. 327-332
2004
PDF 
Keywords: Logic BIST; BDDs
Abstract: Deterministic logic BIST (DLBIST) is an attractive test strategy, since it combines advantages of deterministic external testing and pseudo-random LBIST. Unfortunately, previously published DLBIST methods are unsuited for large ICs, since the computing time and memory consumption of the DLBIST synthesis algorithms increases expotentially, or at least cubically, with the circuit size. In this paper, we propose a novel DLBIST synthesis procedure that has nearly linear complexity in terms of both computing time and memory consumption. The new algorithms are based on binary decision diagrams (BDDs). We demonstrate the efficiency of the new algorithms for industrial designs up to 4M gates.
BibTeX:
@inproceedings{GhermWVHW2004,
  author = {Gherman, Valentin and Wunderlich, Hans-Joachim and Vranken, Harald and Hapke, Friedrich and Wittke, Michael},
  title = {{Efficient Pattern Mapping For Deterministic Logic BIST}},
  booktitle = {Proceedings of the 9th IEEE European Test Sypmposium (ETS'04)},
  publisher = {IEEE Computer Society},
  year = {2004},
  pages = {327--332},
  keywords = {Logic BIST; BDDs},
  abstract = {Deterministic logic BIST (DLBIST) is an attractive test strategy, since it combines advantages of deterministic external testing and pseudo-random LBIST. Unfortunately, previously published DLBIST methods are unsuited for large ICs, since the computing time and memory consumption of the DLBIST synthesis algorithms increases expotentially, or at least cubically, with the circuit size. In this paper, we propose a novel DLBIST synthesis procedure that has nearly linear complexity in terms of both computing time and memory consumption. The new algorithms are based on binary decision diagrams (BDDs). We demonstrate the efficiency of the new algorithms for industrial designs up to 4M gates.},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2004/ETS_GhermWVHW2004.pdf}
}
101. Impact of Test Point Insertion on Silicon Area and Timing during Layout
Vranken, H., Sapei, F.S. and Wunderlich, H.-J.
Proceedings of the 7th Conference on Design, Automation and Test in Europe (DATE'04)
Vol. 2, Paris, France, 29 March-2 April 2004, pp. 20810-20815
2004
DOI URL PDF 
Abstract: This paper presents an experimental investigation on the impact of test point insertion on circuit size and performance. Often test points are inserted into a circuit in order to improve the circuit's testability, which results in smaller test data volume, shorter test time, and higher fault coverage. Inserting test points however requires additional silicon area and influences the timing of a circuit. The paper shows how placement and routing is affected by test point insertion during layout generation. Experimental data for industrial circuits show that inserting 1% test points in general increases the silicon area after layout by less than 0.5% while the performance of the circuit may be reduced by 5% or more.
BibTeX:
@inproceedings{VrankSW2004,
  author = {Vranken, Harald and Sapei, Ferry Syafei and Wunderlich, Hans-Joachim},
  title = {{Impact of Test Point Insertion on Silicon Area and Timing during Layout}},
  booktitle = {Proceedings of the 7th Conference on Design, Automation and Test in Europe (DATE'04)},
  publisher = {IEEE Computer Society},
  year = {2004},
  volume = {2},
  pages = {20810--20815},
  abstract = {This paper presents an experimental investigation on the impact of test point insertion on circuit size and performance. Often test points are inserted into a circuit in order to improve the circuit's testability, which results in smaller test data volume, shorter test time, and higher fault coverage. Inserting test points however requires additional silicon area and influences the timing of a circuit. The paper shows how placement and routing is affected by test point insertion during layout generation. Experimental data for industrial circuits show that inserting 1% test points in general increases the silicon area after layout by less than 0.5% while the performance of the circuit may be reduced by 5% or more.},
  url = {http://www.computer.org/csdl/proceedings/date/2004/2085/02/208520810-abs.html},
  doi = {http://dx.doi.org/10.1109/DATE.2004.1268981},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2004/DATE_VrankSW2004.pdf}
}
100. Reliability Considerations for Mechatronic Systems on the Basis of a State Model
Göhner, P., Zimmer, E., Arnaout, T. and Wunderlich, H.-J.
Proceedings of the 17th International Conference on Architecture of Computing Systems (ARCS'04) - Organic and Pervasive Computing
Vol. 41, Augsburg, Germany, 23-26 March 2004, pp. 106-112
2004
URL PDF 
Abstract: The first step in analyzing a problem is to establish a valid model that would represent this problem. The model helps mainly in understanding the problem by depicting it in a visual form. Hence, in order to analyze the reliability of mechatronic systems, we need to understand first how such systems fail and how they behave in the presence of a failure. This understanding would help us later in the analysis and the development of formal solutions to achieve the demanded reliability. This could be achieved using the model that we have developed, which will be presented in this paper.
BibTeX:
@inproceedings{GoehnZAW2004,
  author = {Göhner, Peter and Zimmer, Eduard and Arnaout, Talal and Wunderlich, Hans-Joachim},
  title = {{Reliability Considerations for Mechatronic Systems on the Basis of a State Model}},
  booktitle = {Proceedings of the 17th International Conference on Architecture of Computing Systems (ARCS'04) - Organic and Pervasive Computing},
  publisher = {Gesellschaft für Informatik},
  year = {2004},
  volume = {41},
  pages = {106--112},
  abstract = {The first step in analyzing a problem is to establish a valid model that would represent this problem. The model helps mainly in understanding the problem by depicting it in a visual form. Hence, in order to analyze the reliability of mechatronic systems, we need to understand first how such systems fail and how they behave in the presence of a failure. This understanding would help us later in the analysis and the development of formal solutions to achieve the demanded reliability. This could be achieved using the model that we have developed, which will be presented in this paper.},
  url = {http://subs.emis.de/LNI/Proceedings/Proceedings41/article1149.html},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2004/ARCS_GoehnZAW2004.pdf}
}
99. Test Engineering Education in Europe: the EuNICE-Test Project
Bertrand, Y., Flottes, M.-L., Balado, L., Figueras, J., Biasizzo, A., Novak, F., Di Carlo, S., Prinetto, P., Pricopi, N., Wunderlich, H.-J. and Van der Heyden, J.-P.
Proceedings of the IEEE International Conference on Microelectronic Systems Education (MSE'03), Anaheim, California, USA, 1-2 July 2003, pp. 85-86
2003
DOI URL PDF 
Abstract: The paper deals with a European experience of education in industrial test of ICs and SoCs using remote testing facilities. The project addresses the problem of the shortage in microelectronics engineers aware with the new challenge of testing mixed-signal SoCs for multimedia/telecom market. It aims at providing test training facilities at a European scale in both initial and continuing education contexts. This is done by allowing the academic and industrial partners of the consortium to train engineers using the common test resources center (CRTC) hosted by LIRMM (Laboratoire d'Informatique, de Robotique et de Micro-électronique de Montpellier, France). CRTC test tools include up-to-date/high-tech testers that are fully representative of real industrial testers as used on production testfloors. At the end of the project, it is aimed at reaching a cruising speed of about 16 trainees per year per center. Each trainee will have attend at least one one-week training using the remote test facilities of CRTC.
BibTeX:
@inproceedings{BertrFBFBNDPPW2003,
  author = {Bertrand, Yves and Flottes, Marie-Lise and Balado, Luz and Figueras, Joan and Biasizzo, Anton and Novak, Frank and Di Carlo, Stefano and Prinetto, Paolo and Pricopi, Nicoleta and Wunderlich, Hans-Joachim and Van der Heyden, Jean-Pierre},
  title = {{Test Engineering Education in Europe: the EuNICE-Test Project}},
  booktitle = {Proceedings of the IEEE International Conference on Microelectronic Systems Education (MSE'03)},
  publisher = {IEEE Computer Society},
  year = {2003},
  pages = {85--86},
  abstract = {The paper deals with a European experience of education in industrial test of ICs and SoCs using remote testing facilities. The project addresses the problem of the shortage in microelectronics engineers aware with the new challenge of testing mixed-signal SoCs for multimedia/telecom market. It aims at providing test training facilities at a European scale in both initial and continuing education contexts. This is done by allowing the academic and industrial partners of the consortium to train engineers using the common test resources center (CRTC) hosted by LIRMM (Laboratoire d'Informatique, de Robotique et de Micro-électronique de Montpellier, France). CRTC test tools include up-to-date/high-tech testers that are fully representative of real industrial testers as used on production testfloors. At the end of the project, it is aimed at reaching a cruising speed of about 16 trainees per year per center. Each trainee will have attend at least one one-week training using the remote test facilities of CRTC.},
  url = {http://www.computer.org/csdl/proceedings/mse/2003/1973/00/19730085-abs.html},
  doi = {http://dx.doi.org/10.1109/MSE.2003.1205266},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2003/MSE_BertrFBFBNCPPW2003.pdf}
}
98. Adapting an SoC to ATE Concurrent Test Capabilities
Dorsch, R., Rivera, R.H., Wunderlich, H.-J. and Fischer, M.
Proceedings of the 33rd International Test Conference (ITC'02), Baltimore, Maryland, USA, 7-10 October 2002, pp. 1169-1175
2002
DOI URL PDF 
Keywords: ATE; concurrent test; SoC test; test resource partitioning
Abstract: Concurrent test features are available in the next generation SoC testers to increase ATE throughput. To exploit these new features design modifications are necessary. In a case study, these modifications were applied to the open source Leon SoC platform containing an embedded 32 bit CPU, an AMBA bus, and several embedded cores. The concurrent test of Leon was performed on an SoC tester. The gain in test application time and area costs are quantified and obstacles in the design flow for concurrent test are discussed.
BibTeX:
@inproceedings{DorscRWF2002,
  author = {Dorsch, Rainer and Rivera, Ramón Huerta and Wunderlich, Hans-Joachim and Fischer, Martin},
  title = {{Adapting an SoC to ATE Concurrent Test Capabilities}},
  booktitle = {Proceedings of the 33rd International Test Conference (ITC'02)},
  publisher = {IEEE Computer Society},
  year = {2002},
  pages = {1169--1175},
  keywords = {ATE; concurrent test; SoC test; test resource partitioning},
  abstract = {Concurrent test features are available in the next generation SoC testers to increase ATE throughput. To exploit these new features design modifications are necessary. In a case study, these modifications were applied to the open source Leon SoC platform containing an embedded 32 bit CPU, an AMBA bus, and several embedded cores. The concurrent test of Leon was performed on an SoC tester. The gain in test application time and area costs are quantified and obstacles in the design flow for concurrent test are discussed.},
  url = {http://www.computer.org/csdl/proceedings/itc/2002/7543/00/75431169-abs.html},
  doi = {http://dx.doi.org/10.1109/TEST.2002.1041875},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2002/ITC_DorscRWF2002.pdf}
}
97. High Defect Coverage with Low Power Test Sequences in a BIST Environment
Girard, P., Landrault, C., Pravossoudovitch, S., Virazel, A. and Wunderlich, H.-J.
IEEE Design & Test of Computers
Vol. 19(5), September-October 2002, pp. 44-52
2002
DOI URL PDF 
Abstract: A new technique, random single-input change (RSIC) test generation, generates low-power test patterns that provide a high level of defect coverage during low-power BIST of digital circuits. The authors propose a parallel BIST implementation of the RSIC generator and analyze its area-overhead impact.
BibTeX:
@article{GirarLPVW2002,
  author = {Girard, Patrick and Landrault, Christian and Pravossoudovitch, Serge and Virazel, Arnaud and Wunderlich, Hans-Joachim},
  title = {{High Defect Coverage with Low Power Test Sequences in a BIST Environment}},
  journal = {IEEE Design & Test of Computers},
  publisher = {IEEE Computer Society},
  year = {2002},
  volume = {19},
  number = {5},
  pages = {44--52},
  abstract = {A new technique, random single-input change (RSIC) test generation, generates low-power test patterns that provide a high level of defect coverage during low-power BIST of digital circuits. The authors propose a parallel BIST implementation of the RSIC generator and analyze its area-overhead impact.},
  url = {http://dl.acm.org/citation.cfm?id=622211.623191},
  doi = {http://dx.doi.org/10.1109/MDT.2002.1033791},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2002/D&T_GirarLPVW2002.pdf}
}
96. Efficient Online and Offline Testing of Embedded DRAMs
Hellebrand, S., Wunderlich, H.-J., Ivaniuk, A.A., Klimets, Y.V. and Yarmolik, V.N.
IEEE Transactions on Computers
Vol. 51(7), July 2002, pp. 801-809
2002
DOI URL PDF 
Keywords: Embedded memories; systems-on-a-chip; online checking; BIST
Abstract: This paper presents an integrated approach for both built-in online and offline testing of embedded DRAMs. It is based on a new technique for output data compression which offers the same benefits as signature analysis during offline test, but also supports efficient online consistency checking. The initial fault-free memory contents are compressed to a reference characteristic and compared to test characteristics periodically. The reference characteristic depends on the memory contents, but unlike similar characteristics based on signature analysis, it can be easily updated concurrently with WRITE operations. This way, changes in memory do not require a time consuming recomputation. The respective test characteristics can be efficiently computed during the periodic refresh operations of the dynamic RAM. Experiments show that the proposed technique significantly reduces the time between the occurrence of an error and its detection (error detection latency). Compared to error detecting codes (EDC) it also achieves a significantly higher error coverage at lower hardware costs. Therefore, it perfectly complements standard online checking approaches relying on EDC, where the concurrent detection of certain types of errors is guaranteed, but only during READ operations accessing the erroneous data.
BibTeX:
@article{HelleWIKY2002,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim and Ivaniuk, Alexander A. and Klimets, Yuri V. and Yarmolik, Vyacheslav N.},
  title = {{Efficient Online and Offline Testing of Embedded DRAMs}},
  journal = {IEEE Transactions on Computers},
  publisher = {IEEE Computer Society},
  year = {2002},
  volume = {51},
  number = {7},
  pages = {801--809},
  keywords = {Embedded memories; systems-on-a-chip; online checking; BIST},
  abstract = {This paper presents an integrated approach for both built-in online and offline testing of embedded DRAMs. It is based on a new technique for output data compression which offers the same benefits as signature analysis during offline test, but also supports efficient online consistency checking. The initial fault-free memory contents are compressed to a reference characteristic and compared to test characteristics periodically. The reference characteristic depends on the memory contents, but unlike similar characteristics based on signature analysis, it can be easily updated concurrently with WRITE operations. This way, changes in memory do not require a time consuming recomputation. The respective test characteristics can be efficiently computed during the periodic refresh operations of the dynamic RAM. Experiments show that the proposed technique significantly reduces the time between the occurrence of an error and its detection (error detection latency). Compared to error detecting codes (EDC) it also achieves a significantly higher error coverage at lower hardware costs. Therefore, it perfectly complements standard online checking approaches relying on EDC, where the concurrent detection of certain types of errors is guaranteed, but only during READ operations accessing the erroneous data.},
  url = {http://dl.acm.org/citation.cfm?id=626529.627210},
  doi = {http://dx.doi.org/10.1109/TC.2002.1017700},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2002/ToC_HelleWIKY2002.pdf}
}
95. RESPIN++ - Deterministic Embedded Test
Schäfer, L., Dorsch, R. and Wunderlich, H.-J.
Proceedings of the 7th European Test Workshop (ETW'02), Korfu, Greece, 26-29 May 2002, pp. 37-44
2002
DOI URL PDF 
Abstract: RESPIN++ is a deterministic embedded test method tailored to system chips, which implement scan test at core level. The scan chains of one core of the system-on-a-chip are reused to decompress the patterns for another core. To implement the RESPIN++ test architecture only a few gates need to be added to the test wrapper. This will not affect the critical paths of the system. The RESPIN++ method reduces both test data volume and test application time up to one order of magnitude per core compared to storing compacted test patterns on the ATE. If several cores may be tested concurrently, test data volume and test application time for the complete system test may be reduced even further. This paper presents the RESPIN++ test architecture and a compression algorithm for the architecture.
BibTeX:
@inproceedings{SchaeDW2002,
  author = {Schäfer, Lars and Dorsch, Rainer and Wunderlich, Hans-Joachim},
  title = {{RESPIN++ - Deterministic Embedded Test}},
  booktitle = {Proceedings of the 7th European Test Workshop (ETW'02)},
  publisher = {IEEE Computer Society},
  year = {2002},
  pages = {37--44},
  abstract = {RESPIN++ is a deterministic embedded test method tailored to system chips, which implement scan test at core level. The scan chains of one core of the system-on-a-chip are reused to decompress the patterns for another core. To implement the RESPIN++ test architecture only a few gates need to be added to the test wrapper. This will not affect the critical paths of the system. The RESPIN++ method reduces both test data volume and test application time up to one order of magnitude per core compared to storing compacted test patterns on the ATE. If several cores may be tested concurrently, test data volume and test application time for the complete system test may be reduced even further. This paper presents the RESPIN++ test architecture and a compression algorithm for the architecture.},
  url = {http://www.computer.org/csdl/proceedings/etw/2002/1715/00/17150037-abs.html},
  doi = {http://dx.doi.org/10.1109/ETW.2002.1029637},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2002/ETW_SchaeDW2002.pdf}
}
94. Combining Deterministic Logic BIST with Test Point Insertion
Vranken, H., Meister, F. and Wunderlich, H.-J.
Proceedings of the 7th European Test Workshop (ETW'02), Korfu, Greece, 26-29 May 2002, pp. 105-110
2002
DOI URL PDF 
Abstract: This paper presents a logic BIST approach which combines deterministic logic BIST with test point insertion. Test points are inserted to obtain a first testability improvement, and next a deterministic pattern generator is added to increase the fault efficiency up to 100 The silicon cell area for the combined approach is smaller than for approaches that apply a deterministic pattern generator or test points only. The combined approach also removes the classical limitations and drawbacks of test point insertion, such as failing to achieve complete fault coverage and a complicated design flow. The benefits of the combined approach are demonstrated in experimental results on a large number of ISCAS and industrial circuits.
BibTeX:
@inproceedings{VrankMW2002,
  author = {Vranken, Harald and Meister, Florian and Wunderlich, Hans-Joachim},
  title = {{Combining Deterministic Logic BIST with Test Point Insertion}},
  booktitle = {Proceedings of the 7th European Test Workshop (ETW'02)},
  publisher = {IEEE Computer Society},
  year = {2002},
  pages = {105--110},
  abstract = {This paper presents a logic BIST approach which combines deterministic logic BIST with test point insertion. Test points are inserted to obtain a first testability improvement, and next a deterministic pattern generator is added to increase the fault efficiency up to 100 The silicon cell area for the combined approach is smaller than for approaches that apply a deterministic pattern generator or test points only. The combined approach also removes the classical limitations and drawbacks of test point insertion, such as failing to achieve complete fault coverage and a complicated design flow. The benefits of the combined approach are demonstrated in experimental results on a large number of ISCAS and industrial circuits.},
  url = {http://www.computer.org/csdl/proceedings/etw/2002/1715/00/17150105-abs.html},
  doi = {http://dx.doi.org/10.1109/ETW.2002.1029646},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2002/ETW_VrankMW2002.pdf}
}
93. Reusing Scan Chains for Test Pattern Decompression
Dorsch, R. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 18(2), April 2002, pp. 231-240
2002
DOI URL PDF 
Keywords: system-on-a-chip; embedded test; BIST
Abstract: The paper presents a method for testing a system-on-a-chip by using a compressed representation of the patterns on an external tester. The patterns for a certain core under test are decompressed by reusing scan chains of cores idle during that time. The method only requires a few additional gates in the wrapper, while the mission logic is untouched. Storage and bandwidth requirements for the ATE are reduced significantly.
BibTeX:
@article{DorscW2002,
  author = {Dorsch, Rainer and Wunderlich, Hans-Joachim},
  title = {{Reusing Scan Chains for Test Pattern Decompression}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {2002},
  volume = {18},
  number = {2},
  pages = {231--240},
  keywords = {system-on-a-chip; embedded test; BIST},
  abstract = {The paper presents a method for testing a system-on-a-chip by using a compressed representation of the patterns on an external tester. The patterns for a certain core under test are decompressed by reusing scan chains of cores idle during that time. The method only requires a few additional gates in the wrapper, while the mission logic is untouched. Storage and bandwidth requirements for the ATE are reduced significantly.},
  url = {http://dl.acm.org/citation.cfm?id=608806.608921},
  doi = {http://dx.doi.org/10.1023/A:1014968930415},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2002/JETTA_DorscW2002.pdf}
}
92. Two-Dimensional Test Data Compression for Scan-Based Deterministic BIST
Liang, H.-G., Hellebrand, S. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 18(2), April 2002, pp. 159-170
2002
DOI URL PDF 
Keywords: BIST; deterministic BIST; store and generate schemes; test data compression
Abstract: In this paper a novel architecture for scan-based mixed mode BIST is presented. To reduce the storage requirements for the deterministic patterns it relies on a two-dimensional compression scheme, which combines the advantages of known vertical and hoizontal compression techniques. To reduce both the number of patterns to be stored and the number of bits to be stored for each pattern, deterministic test cubes are encoded as seeds of an LFSR (horizontal compression), and the seeds are again compressed into seeds of a folding counter sequence (vertical compression). The proposed BIST architecture is fully compatible with standard scan esign, simple and flexible, so that sharing between several logic cores is p0ossible. Experimental results show that the proposed scheme requires less test data storage than previously publiched approaches providing the same flexibility and scan compatibility.
BibTeX:
@article{LiangHW2002,
  author = {Liang, Hua-Guo and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Two-Dimensional Test Data Compression for Scan-Based Deterministic BIST}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {2002},
  volume = {18},
  number = {2},
  pages = {159--170},
  keywords = {BIST; deterministic BIST; store and generate schemes; test data compression},
  abstract = {In this paper a novel architecture for scan-based mixed mode BIST is presented. To reduce the storage requirements for the deterministic patterns it relies on a two-dimensional compression scheme, which combines the advantages of known vertical and hoizontal compression techniques. To reduce both the number of patterns to be stored and the number of bits to be stored for each pattern, deterministic test cubes are encoded as seeds of an LFSR (horizontal compression), and the seeds are again compressed into seeds of a folding counter sequence (vertical compression). The proposed BIST architecture is fully compatible with standard scan esign, simple and flexible, so that sharing between several logic cores is p0ossible. Experimental results show that the proposed scheme requires less test data storage than previously publiched approaches providing the same flexibility and scan compatibility.},
  url = {http://dl.acm.org/citation.cfm?id=608806.608915},
  doi = {http://dx.doi.org/10.1023/A:1014993509806},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2002/JETTA_LiangHW2002.pdf}
}
91. A Mixed-Mode BIST Scheme Based on Folding Compression
Liang, H., Hellebrand, S. and Wunderlich, H.-J.
Journal of Computer Science and Technology
Vol. 17(2), March 2002, pp. 203-212
2002
DOI PDF 
Keywords: BIST; random pattern testing; LFSR; folding set; encoding seed
Abstract: In this paper a new scheme for mixed mode scan-based BIST is presented with complete fault coverage, and some new concepts of folding set and computing are introduced. This scheme applies single feedback polynomial of LFSR for generating pseudo-random patterns, as well as for compressing and extending seeds of folding sets and an LFSR, where we encode seed of folding set as an initial seed of LFSR. Moreover these new techniques are 100% compatible with scan design. Experimental results show that the proposed scheme outperforms previously published approaches based on the reseeding of LFSRs.
BibTeX:
@article{LiangHW2002a,
  author = {Liang, Huaguo and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{A Mixed-Mode BIST Scheme Based on Folding Compression}},
  journal = {Journal of Computer Science and Technology},
  publisher = {Science Press},
  year = {2002},
  volume = {17},
  number = {2},
  pages = {203-212},
  keywords = {BIST; random pattern testing; LFSR; folding set; encoding seed},
  abstract = { In this paper a new scheme for mixed mode scan-based BIST is presented with complete fault coverage, and some new concepts of folding set and computing are introduced. This scheme applies single feedback polynomial of LFSR for generating pseudo-random patterns, as well as for compressing and extending seeds of folding sets and an LFSR, where we encode seed of folding set as an initial seed of LFSR. Moreover these new techniques are 100% compatible with scan design. Experimental results show that the proposed scheme outperforms previously published approaches based on the reseeding of LFSRs.},
  doi = {http://dx.doi.org/10.1007/BF02962213},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/2002/JCST_LiangHW2002.pdf}
}
90. Tailoring ATPG for Embedded Testing
Dorsch, R. and Wunderlich, H.-J.
Proceedings of the 32nd IEEE International Test Conference (ITC'01), Baltimore, Maryland, USA, 30 October-1 November 2001, pp. 530-537
2001
DOI URL PDF 
Keywords: Test Resource Partitioning; Systems-on-a-Chip; ATPG
Abstract: An automatic test pattern generation (ATPG) method is presented Testability for a scan-based test architecture which min-imizes ATE storage requirements and reduces the bandwidth be-tween the automatic test equipment (ATE) and the chip under test. To generate tailored deterministic test patterns, a standard ATPG tool performing dynamic compaction and allowing constraints on circuit inputs is used. The combination of an appropriate test ar-chitecture and the tailored test patterns reduces the test data vol-ume up to two orders of magnitude compared with standard com-pacted test sets.
BibTeX:
@inproceedings{DorscW2001a,
  author = {Dorsch, Rainer and Wunderlich, Hans-Joachim},
  title = {{Tailoring ATPG for Embedded Testing}},
  booktitle = {Proceedings of the 32nd IEEE International Test Conference (ITC'01)},
  publisher = {IEEE Computer Society},
  year = {2001},
  pages = {530--537},
  keywords = {Test Resource Partitioning; Systems-on-a-Chip; ATPG},
  abstract = {An automatic test pattern generation (ATPG) method is presented Testability for a scan-based test architecture which min-imizes ATE storage requirements and reduces the bandwidth be-tween the automatic test equipment (ATE) and the chip under test. To generate tailored deterministic test patterns, a standard ATPG tool performing dynamic compaction and allowing constraints on circuit inputs is used. The combination of an appropriate test ar-chitecture and the tailored test patterns reduces the test data vol-ume up to two orders of magnitude compared with standard com-pacted test sets.},
  url = {http://www.computer.org/csdl/proceedings/itc/2001/7171/00/71710530-abs.html},
  doi = {http://dx.doi.org/10.1109/TEST.2001.966671},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2001/ITC_DorscW2001a.pdf}
}
89. Using a Hierarchical DfT Methodology in High Frequency Processor Designs for Improved Delay Fault Testability
Kessler, M., Kiefer, G., Leenstra, J., Schünemann, K., Schwarz, T. and Wunderlich, H.-J.
Proceedings of the 32nd IEEE International Test Conference (ITC'01), Baltimore, Maryland, USA, 30 October-1 November 2001, pp. 461-469
2001
DOI URL PDF 
Keywords: hierarchical; DfT; BIST; testability; scan chain reordering
Abstract: In this paper a novel hierarchical DfT methodology is presented which is targeted to improve the delay fault testability for external testing and scan-based BIST. After the partitioning of the design into high frequency macros, the analysis for delay fault testability already starts in parallel with the implementation at the macro level. A specification is generated for each macro that defines the delay fault testing characteristics at the macro boundaries. This specification is used to analyse and improve the delay fault testability by improving the scan chain ordering at macro-level before the macros are connected together into the total chip network. The hierarchical methodology has been evaluated with the instruction window buffer core of an out-of-order processor. It was shown that for this design practically no extra hardware is required.
BibTeX:
@inproceedings{KesslKLSSW2001,
  author = {Kessler, Michael and Kiefer, Gundolf and Leenstra, Jens and Schünemann, Knut and Schwarz, Thomas and Wunderlich, Hans-Joachim},
  title = {{Using a Hierarchical DfT Methodology in High Frequency Processor Designs for Improved Delay Fault Testability}},
  booktitle = {Proceedings of the 32nd IEEE International Test Conference (ITC'01)},
  publisher = {IEEE Computer Society},
  year = {2001},
  pages = {461--469},
  keywords = {hierarchical; DfT; BIST; testability; scan chain reordering},
  abstract = {In this paper a novel hierarchical DfT methodology is presented which is targeted to improve the delay fault testability for external testing and scan-based BIST. After the partitioning of the design into high frequency macros, the analysis for delay fault testability already starts in parallel with the implementation at the macro level. A specification is generated for each macro that defines the delay fault testing characteristics at the macro boundaries. This specification is used to analyse and improve the delay fault testability by improving the scan chain ordering at macro-level before the macros are connected together into the total chip network. The hierarchical methodology has been evaluated with the instruction window buffer core of an out-of-order processor. It was shown that for this design practically no extra hardware is required.},
  url = {http://www.computer.org/csdl/proceedings/itc/2001/7171/00/71710461-abs.html},
  doi = {http://dx.doi.org/10.1109/TEST.2001.966663},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2001/ITC_KesslKLSSW2001.pdf}
}
88. Two-Dimensional Test Data Compression for Scan-Based Deterministic BIST
Liang, H.-G., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 32nd IEEE International Test Conference (ITC'01), Baltimore, Maryland, USA, 30 October-1 November 2001, pp. 894-902
2001
DOI URL PDF 
Abstract: In this paper a novel architecture for scan-based mixed mode BIST is presented. To reduce the storage requirements for the deterministic patterns it relies on a two-dimensional compression scheme, which combines the advantages of known vertical and hoizontal compression techniques. To reduce both the number of patterns to be stored and the number of bits to be stored for each pattern, deterministic test cubes are encoded as seeds of an LFSR (horizontal compression), and the seeds are again compressed into seeds of a folding counter sequence (vertical compression). The proposed BIST architecture is fully compatible with standard scan esign, simple and flexible, so that sharing between several logic cores is p0ossible. Experimental results show that the proposed scheme requires less test data storage than previously publiched approaches providing the same flexibility and scan compatibility.
BibTeX:
@inproceedings{LiangHW2001,
  author = {Liang, Hua-Guo and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Two-Dimensional Test Data Compression for Scan-Based Deterministic BIST}},
  booktitle = {Proceedings of the 32nd IEEE International Test Conference (ITC'01)},
  publisher = {IEEE Computer Society},
  year = {2001},
  pages = {894--902},
  abstract = {In this paper a novel architecture for scan-based mixed mode BIST is presented. To reduce the storage requirements for the deterministic patterns it relies on a two-dimensional compression scheme, which combines the advantages of known vertical and hoizontal compression techniques. To reduce both the number of patterns to be stored and the number of bits to be stored for each pattern, deterministic test cubes are encoded as seeds of an LFSR (horizontal compression), and the seeds are again compressed into seeds of a folding counter sequence (vertical compression). The proposed BIST architecture is fully compatible with standard scan esign, simple and flexible, so that sharing between several logic cores is p0ossible. Experimental results show that the proposed scheme requires less test data storage than previously publiched approaches providing the same flexibility and scan compatibility.},
  url = {http://www.computer.org/csdl/proceedings/itc/2001/7171/00/71710894-abs.html},
  doi = {http://dx.doi.org/10.1109/TEST.2001.966712},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2001/ITC_LiangHW2001.pdf}
}
87. A Mixed Mode BIST Scheme Based on Reseeding of Folding Counters
Hellebrand, S., Liang, H.-G. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 17(3-4), June 2001, pp. 341-349
2001
DOI URL PDF 
Keywords: BIST; deterministic BIST; store and generate schemes
Abstract: In this paper a new scheme for deterministic and mixed mode scan-based BIST is presented. It relies on a new type of test pattern generator which resembles a programmable Johnson counter and is called folding counter. Both the theoretical background and practical algorithms are presented to characterize a set of deterministic test cubes by a reasonably small number of seeds for a folding counter. Combined with classical techniques for test width compression and with pseudo-random pattern generation these new techniques provide an efficient and flexible solution for scan-based BIST. Experimental results show that the proposed scheme outperforms previously published approaches based on the reseeding of LFSRs or Johnson counters.
BibTeX:
@article{HelleLW2001,
  author = {Hellebrand, Sybille and Liang, Hua-Guo and Wunderlich, Hans-Joachim},
  title = {{A Mixed Mode BIST Scheme Based on Reseeding of Folding Counters}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {2001},
  volume = {17},
  number = {3-4},
  pages = {341--349},
  keywords = {BIST; deterministic BIST; store and generate schemes},
  abstract = {In this paper a new scheme for deterministic and mixed mode scan-based BIST is presented. It relies on a new type of test pattern generator which resembles a programmable Johnson counter and is called folding counter. Both the theoretical background and practical algorithms are presented to characterize a set of deterministic test cubes by a reasonably small number of seeds for a folding counter. Combined with classical techniques for test width compression and with pseudo-random pattern generation these new techniques provide an efficient and flexible solution for scan-based BIST. Experimental results show that the proposed scheme outperforms previously published approaches based on the reseeding of LFSRs or Johnson counters.},
  url = {http://dl.acm.org/citation.cfm?id=608802.608876},
  doi = {http://dx.doi.org/10.1023/A:1012279716236},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2001/JETTA_HelleLW2001.pdf}
}
86. Application of Deterministic Logic BIST on Industrial Circuits
Kiefer, G., Vranken, H., Marinissen, E.J. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 17(3-4), June 2001, pp. 351-362
2001
DOI URL PDF 
Keywords: logic BIST; industrial applications; scan-based BIST
Abstract: We present the application of a deterministic logic BIST scheme based on bit-flipping on state-of-the-art industrial circuits. Experimental results show that complete fault coverage can be achieved for industrial circuits up to 100 K gates with 10,000 test patterns, at a total area cost for BIST hardware of typically 5% - 15 It is demonstrated that a trade-off is possible between test quality, test time, and silicon area. In contrast to BIST schemes based on test point insertion no modifications of the circuit under test are required, complete fault efficiency is guaranteed, and the impact on the design process is minimized.
BibTeX:
@article{KiefeVMW2001,
  author = {Kiefer, Gundolf and Vranken, Harald and Marinissen, Erik Jan and Wunderlich, Hans-Joachim},
  title = {{Application of Deterministic Logic BIST on Industrial Circuits}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {2001},
  volume = {17},
  number = {3-4},
  pages = {351--362},
  keywords = {logic BIST; industrial applications; scan-based BIST},
  abstract = {We present the application of a deterministic logic BIST scheme based on bit-flipping on state-of-the-art industrial circuits. Experimental results show that complete fault coverage can be achieved for industrial circuits up to 100 K gates with 10,000 test patterns, at a total area cost for BIST hardware of typically 5% - 15 It is demonstrated that a trade-off is possible between test quality, test time, and silicon area. In contrast to BIST schemes based on test point insertion no modifications of the circuit under test are required, complete fault efficiency is guaranteed, and the impact on the design process is minimized.},
  url = {http://dl.acm.org/citation.cfm?id=608802.608877},
  doi = {http://dx.doi.org/10.1023/A:1012283800306},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2001/JETTA_KiefeVMW2001.pdf}
}
85. Reusing Scan Chains for Test Pattern Decompression
Dorsch, R. and Wunderlich, H.-J.
Proceedings of the 6th European Test Workshop (ETW'01), Stockholm, Sweden, 29 May-1 June 2001, pp. 124-132
2001
DOI PDF 
Keywords: system-on-a-chip; embedded test; BIST
Abstract: The paper presents a method for testing a system-on-a-chip by using a compressed representation of the patterns on an external tester. The patterns for a certain core under test are decompressed by reusing scan chains of cores idle during that time. The method only requires a few additional gates in the wrapper, while the mission logic is untouched. Storage and bandwidth requirements for the ATE are reduced significantly.
BibTeX:
@inproceedings{DorscW2001,
  author = {Dorsch, Rainer and Wunderlich, Hans-Joachim},
  title = {{Reusing Scan Chains for Test Pattern Decompression}},
  booktitle = {Proceedings of the 6th European Test Workshop (ETW'01)},
  publisher = {IEEE Computer Society},
  year = {2001},
  pages = {124--132},
  keywords = {system-on-a-chip; embedded test; BIST},
  abstract = {The paper presents a method for testing a system-on-a-chip by using a compressed representation of the patterns on an external tester. The patterns for a certain core under test are decompressed by reusing scan chains of cores idle during that time. The method only requires a few additional gates in the wrapper, while the mission logic is untouched. Storage and bandwidth requirements for the ATE are reduced significantly.},
  doi = {http://dx.doi.org/10.1109/ETW.2001.946677},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2001/ETW_DorscW2001.pdf}
}
84. A Modified Clock Scheme for a Low Power BIST Test Pattern Generator
Girard, P., Guiller, L., Landrault, C., Pravossoudovitch, S. and Wunderlich, H.-J.
Proceedings of the 19th VLSI Test Symposium (VTS'01), Marina Del Rey, California, USA, 29 April-3 May 2001, pp. 306-311
2001
DOI URL PDF 
Keywords: Parallel BIST, Low-power Design, Test & Low Power, Low Power BIST
Abstract: In this paper, we present a new low power BIST test pattern generator that provides test vectors which can reduce the switching activity during test operation. The proposed low power/energy BIST technique is based on a modified clock scheme for the TPG and the clock tree feeding the TPG. Numerous advantages can be found in applying such a technique. The fault coverage and the test time are roughly the same as those achieved using a standard BIST scheme. The area overhead is nearly negligible and there is no penalty on the circuit delay. The proposed BIST scheme does not require any circuit design modification beyond the parallel BIST technique, is easily implemented and has low impact on the design time. It has been implemented based on an LFSR-based TPG, but can also be designed using a cellular automata. Reductions of the energy, average power and peak power consumption during test operation are up to 94 55% and 48% respectively for ISCAS and MCNC benchmark circuits.
BibTeX:
@inproceedings{GirarGLPW2001,
  author = {Girard, Patrick and Guiller, Lois and Landrault, Christian and Pravossoudovitch, Serge and Wunderlich, Hans-Joachim},
  title = {{A Modified Clock Scheme for a Low Power BIST Test Pattern Generator}},
  booktitle = {Proceedings of the 19th VLSI Test Symposium (VTS'01)},
  publisher = {IEEE Computer Society},
  year = {2001},
  pages = {306--311},
  keywords = {Parallel BIST, Low-power Design, Test & Low Power, Low Power BIST},
  abstract = {In this paper, we present a new low power BIST test pattern generator that provides test vectors which can reduce the switching activity during test operation. The proposed low power/energy BIST technique is based on a modified clock scheme for the TPG and the clock tree feeding the TPG. Numerous advantages can be found in applying such a technique. The fault coverage and the test time are roughly the same as those achieved using a standard BIST scheme. The area overhead is nearly negligible and there is no penalty on the circuit delay. The proposed BIST scheme does not require any circuit design modification beyond the parallel BIST technique, is easily implemented and has low impact on the design time. It has been implemented based on an LFSR-based TPG, but can also be designed using a cellular automata. Reductions of the energy, average power and peak power consumption during test operation are up to 94 55% and 48% respectively for ISCAS and MCNC benchmark circuits.},
  url = {http://www.computer.org/csdl/proceedings/vts/2001/1122/00/11220306-abs.html},
  doi = {http://dx.doi.org/10.1109/VTS.2001.923454},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2001/VTS_GirarGLPW2001.pdf}
}
83. On Applying the Set Covering Model to Reseeding
Chiusano, S., Di Carlo, S., Prinetto, P. and Wunderlich, H.-J.
Proceedings of the 4th Conference on Design, Automation and Test in Europe (DATE'01), Munich, Germany, 13-16 March 2001, pp. 156-160
2001
DOI URL PDF 
Keywords: built-in self test; computational complexity; encoding; integrated circuit testing
Abstract: The Functional BIST approach is a rather new BIST technique based on exploiting embedded system functionality to generate deterministic test patterns during BIST. The approach takes advantages of two well-known testing techniques, the arithmetic BIST approach and the reseeding method.
The main contribution of the present paper consists in formulating the problem of an optimal reseeding computation as an instance of the set covering problem. The proposed approach guarantees high flexibility, is applicable to different functional modules, and, in general, provides a more efficient test set encoding then previous techniques. In addition, the approach shorts the computation time and allows to better exploiting the tradeoff between area overhead and global test length as well as to deal with larger circuits.
BibTeX:
@inproceedings{ChiusDPW2001,
  author = {Chiusano, Silvia and Di Carlo, Stefano and Prinetto, Paolo and Wunderlich, Hans-Joachim},
  title = {{On Applying the Set Covering Model to Reseeding}},
  booktitle = {Proceedings of the 4th Conference on Design, Automation and Test in Europe (DATE'01)},
  publisher = {IEEE Computer Society},
  year = {2001},
  pages = {156--160},
  keywords = {built-in self test; computational complexity; encoding; integrated circuit testing},
  abstract = {The Functional BIST approach is a rather new BIST technique based on exploiting embedded system functionality to generate deterministic test patterns during BIST. The approach takes advantages of two well-known testing techniques, the arithmetic BIST approach and the reseeding method. 
The main contribution of the present paper consists in formulating the problem of an optimal reseeding computation as an instance of the set covering problem. The proposed approach guarantees high flexibility, is applicable to different functional modules, and, in general, provides a more efficient test set encoding then previous techniques. In addition, the approach shorts the computation time and allows to better exploiting the tradeoff between area overhead and global test length as well as to deal with larger circuits.}, url = {http://www.computer.org/csdl/proceedings/date/2001/0993/00/09930156-abs.html}, doi = {http://dx.doi.org/10.1109/DATE.2001.915017}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2001/DATE_ChiusCPW2001.pdf} }
82. Circuit Partitioning for Efficient Logic BIST Synthesis
Irion, A., Kiefer, G., Vranken, H. and Wunderlich, H.-J.
Proceedings of the 4th Conference on Design, Automation and Test in Europe (DATE'01), Munich, Germany, 13-16 March 2001, pp. 86-91
2001
DOI URL PDF 
Keywords: circuit partitionig; deterministic BIST; divide-and-conquer
Abstract: A divide-and-conquer approach using circuit partitioning is presented, which can be used to accelerate logic BIST synthesis procedures. Many BIST synthesis algorithms contain steps with a time complexity which increases more than linearly with the circuit size. By extracting sub-circuits which are almost constant in size, BIST synthesis for very large designs may be possible within linear time. The partitioning approach does not require any physical modifications of the circuit under test. Experiments show that significant performance improvements can be obtained at the cost of a longer test application time or a slight increase in silicon area for the BIST hardware.
BibTeX:
@inproceedings{IrionKVW2001,
  author = {Irion, Alexander and Kiefer, Gundolf and Vranken, Harald and Wunderlich, Hans-Joachim},
  title = {{Circuit Partitioning for Efficient Logic BIST Synthesis}},
  booktitle = {Proceedings of the 4th Conference on Design, Automation and Test in Europe (DATE'01)},
  publisher = {IEEE Computer Society},
  year = {2001},
  pages = {86--91},
  keywords = {circuit partitionig; deterministic BIST; divide-and-conquer},
  abstract = {A divide-and-conquer approach using circuit partitioning is presented, which can be used to accelerate logic BIST synthesis procedures. Many BIST synthesis algorithms contain steps with a time complexity which increases more than linearly with the circuit size. By extracting sub-circuits which are almost constant in size, BIST synthesis for very large designs may be possible within linear time. The partitioning approach does not require any physical modifications of the circuit under test. Experiments show that significant performance improvements can be obtained at the cost of a longer test application time or a slight increase in silicon area for the BIST hardware.},
  url = {http://www.computer.org/csdl/proceedings/date/2001/0993/00/09930086-abs.html},
  doi = {http://dx.doi.org/10.1109/DATE.2001.915005},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2001/DATE_IrionKVW2001.pdf}
}
81. Non-Intrusive BIST for Systems-on-a-Chip
Chiusano, S., Prinetto, P. and Wunderlich, H.-J.
Proceedings of the 31st IEEE International Test Conference (ITC'00), Atlantic City, New Jersey, USA, 3-5 October 2000, pp. 644-651
2000
DOI URL PDF 
Abstract: The term functional BIST describes a test method to control functional modules so that they generate a deterministic test set, which targets structural faults within other parts of the system. It is a promising solution for self-testing complex digital systems at reduced costs in terms of area overhead and performance degradation. While previous work mainly investigated the use of functional modules for generating pseudo-random and pseudo-exhaustive test patterns, the present paper shows that a variety of modules can also be used as a deterministic test pattern generator via an appropriate reseeding strategy. This method enables a BIST technique that does not introduce additional hardware like test points and test registers into combinational and pipelined modules under test. The experimental results prove that the reseeding method works for accumulator based structures, multipliers, or encryption modules as efficiently as for the classic linear feedback shift registers, and some times even better.
BibTeX:
@inproceedings{ChiusPW2000,
  author = {Chiusano, Silvia and Prinetto, Paolo and Wunderlich, Hans-Joachim},
  title = {{Non-Intrusive BIST for Systems-on-a-Chip}},
  booktitle = {Proceedings of the 31st IEEE International Test Conference (ITC'00)},
  publisher = {IEEE Computer Society},
  year = {2000},
  pages = {644--651},
  abstract = {The term functional BIST describes a test method to control functional modules so that they generate a deterministic test set, which targets structural faults within other parts of the system. It is a promising solution for self-testing complex digital systems at reduced costs in terms of area overhead and performance degradation. While previous work mainly investigated the use of functional modules for generating pseudo-random and pseudo-exhaustive test patterns, the present paper shows that a variety of modules can also be used as a deterministic test pattern generator via an appropriate reseeding strategy. This method enables a BIST technique that does not introduce additional hardware like test points and test registers into combinational and pipelined modules under test. The experimental results prove that the reseeding method works for accumulator based structures, multipliers, or encryption modules as efficiently as for the classic linear feedback shift registers, and some times even better.},
  url = {http://www.computer.org/csdl/proceedings/itc/2000/6547/00/65470644-abs.html},
  doi = {http://dx.doi.org/10.1109/TEST.2000.894259},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2000/ITC_ChiusPW2000.pdf}
}
80. A Mixed Mode BIST Scheme Based on Reseeding of Folding Counters
Hellebrand, S., Liang, H.-G. and Wunderlich, H.-J.
Proceedings of the 31st IEEE International Test Conference (ITC'00), Atlantic City, New Jersey, USA, 3-5 October 2000, pp. 778-784
2000
DOI URL PDF 
Abstract: In this paper a new scheme for deterministic and mixed mode scan-based BIST is presented. It relies on a new type of test pattern generator which resembles a programmable Johnson counter and is called folding counter. Both the theoretical background and practical algorithms are presented to characterize a set of deterministic test cubes by a reasonably small number of seeds for a folding counter. Combined with classical techniques for test width compression and with pseudo-random pattern generation these new techniques provide an efficient and flexible solution for scan-based BIST.. Experimental results show that the proposed scheme outperforms previously published approaches based on the reseeding of LFSRs or Johnson counters.
BibTeX:
@inproceedings{HelleLW2000,
  author = {Hellebrand, Sybille and Liang, Hua-Guo and Wunderlich, Hans-Joachim},
  title = {{A Mixed Mode BIST Scheme Based on Reseeding of Folding Counters}},
  booktitle = {Proceedings of the 31st IEEE International Test Conference (ITC'00)},
  publisher = {IEEE Computer Society},
  year = {2000},
  pages = {778--784},
  abstract = {In this paper a new scheme for deterministic and mixed mode scan-based BIST is presented. It relies on a new type of test pattern generator which resembles a programmable Johnson counter and is called folding counter. Both the theoretical background and practical algorithms are presented to characterize a set of deterministic test cubes by a reasonably small number of seeds for a folding counter. Combined with classical techniques for test width compression and with pseudo-random pattern generation these new techniques provide an efficient and flexible solution for scan-based BIST.. Experimental results show that the proposed scheme outperforms previously published approaches based on the reseeding of LFSRs or Johnson counters.},
  url = {http://www.computer.org/csdl/proceedings/itc/2000/6547/00/65470778-abs.html},
  doi = {http://dx.doi.org/10.1109/TEST.2000.894274},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2000/ITC_HelleLW2000.pdf}
}
79. Application of Deterministic Logic BIST on Industrial Circuits
Kiefer, G., Vranken, H., Marinissen, E.J. and Wunderlich, H.-J.
Proceedings of the 31st IEEE International Test Conference (ITC'00), Atlantic City, New Jersey, USA, 3-5 October 2000, pp. 105-114
2000
DOI URL PDF 
Abstract: We present the application of a deterministic logic BIST scheme on state-of-the-art industrial circuits. Experimental results show that complete fault coverage can be achieved for industrial circuits up to 100K gates with 10,000 test patterns, at a total area cost for BIST hardware of typically 515 It is demonstrated that a trade-off is possible between test quality, test time, and silicon area. In contrast to BIST schemes based on test point insertion no modifications of the circuit under test are required, complete fault efficiency is guaranteed, and the impact on the design process is minimized.
BibTeX:
@inproceedings{KiefeVMW2000,
  author = {Kiefer, Gundolf and Vranken, Harald and Marinissen, Erik Jan and Wunderlich, Hans-Joachim},
  title = {{Application of Deterministic Logic BIST on Industrial Circuits}},
  booktitle = {Proceedings of the 31st IEEE International Test Conference (ITC'00)},
  publisher = {IEEE Computer Society},
  year = {2000},
  pages = {105--114},
  abstract = {We present the application of a deterministic logic BIST scheme on state-of-the-art industrial circuits. Experimental results show that complete fault coverage can be achieved for industrial circuits up to 100K gates with 10,000 test patterns, at a total area cost for BIST hardware of typically 515 It is demonstrated that a trade-off is possible between test quality, test time, and silicon area. In contrast to BIST schemes based on test point insertion no modifications of the circuit under test are required, complete fault efficiency is guaranteed, and the impact on the design process is minimized.},
  url = {http://www.computer.org/csdl/proceedings/itc/2000/6547/00/65470105-abs.html},
  doi = {http://dx.doi.org/10.1109/TEST.2000.894197},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2000/ITC_KiefeVMW2000.pdf}
}
78. Minimized Power Consumption for Scan-Based BIST
Gerstendörfer, S. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 16(3), June 2000, pp. 203-212
2000
DOI URL PDF 
Keywords: deterministic scan-based BIST; partial scan; Low Power; Power consumption
Abstract: Power consumption of digital systems may increase significantly during testing. In this paper, systems equipped with a scan-based built-in self-test like the STUMPS architecture are analyzed, the modules and modes with the highest power consumption are identified, and design modifications to reduce power consumption are proposed. The design modifications include some gating logic for masking the scan path activity during shifting, and the synthesis of additional logic for suppressing random patterns which do not contribute to increase the fault coverage. These design changes reduce power consumption during BIST by several orders of magnitude, at very low cost in terms of area and performance.
BibTeX:
@article{GerstW2000,
  author = {Gerstendörfer, Stefan and Wunderlich, Hans-Joachim},
  title = {{Minimized Power Consumption for Scan-Based BIST}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {2000},
  volume = {16},
  number = {3},
  pages = {203--212},
  keywords = {deterministic scan-based BIST; partial scan; Low Power; Power consumption},
  abstract = {Power consumption of digital systems may increase significantly during testing. In this paper, systems equipped with a scan-based built-in self-test like the STUMPS architecture are analyzed, the modules and modes with the highest power consumption are identified, and design modifications to reduce power consumption are proposed. The design modifications include some gating logic for masking the scan path activity during shifting, and the synthesis of additional logic for suppressing random patterns which do not contribute to increase the fault coverage. These design changes reduce power consumption during BIST by several orders of magnitude, at very low cost in terms of area and performance.},
  url = {http://dl.acm.org/citation.cfm?id=348417.348436},
  doi = {http://dx.doi.org/10.1023/A:1008383013319},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2000/JETTA_GerstW2000.pdf}
}
77. Deterministic BIST with Partial Scan
Kiefer, G. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 16(3), June 2000, pp. 169-177
2000
DOI URL PDF 
Keywords: deterministic scan-based BIST; partial scan
Abstract: An efficient deterministic BIST scheme based on partial scan chains together with a scan selection algorithm tailored for BIST is presented. The algorithm determines a minimum number of flipflops to be scannable so that the remaining circuit has a pipeline-like structure. Experiments show that scanning less flipflops may even decrease the hardware overhead for the on-chip pattern generator besides the classical advantages of partial scan such as less impact on the system performance and less hardware overhead.
BibTeX:
@article{KiefeW2000,
  author = {Kiefer, Gundolf and Wunderlich, Hans-Joachim},
  title = {{Deterministic BIST with Partial Scan}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {2000},
  volume = {16},
  number = {3},
  pages = {169--177},
  keywords = {deterministic scan-based BIST; partial scan},
  abstract = {An efficient deterministic BIST scheme based on partial scan chains together with a scan selection algorithm tailored for BIST is presented. The algorithm determines a minimum number of flipflops to be scannable so that the remaining circuit has a pipeline-like structure. Experiments show that scanning less flipflops may even decrease the hardware overhead for the on-chip pattern generator besides the classical advantages of partial scan such as less impact on the system performance and less hardware overhead.},
  url = {http://dl.acm.org/citation.cfm?id=348417.348421},
  doi = {http://dx.doi.org/10.1023/A:1008374811502},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2000/JETTA_KiefeW2000.pdf}
}
76. Optimal Hardware Pattern Generation for Functional BIST
Cataldo, S., Chiusano, S., Prinetto, P. and Wunderlich, H.-J.
Proceedings of the 7th IEEE/ACM/IFIP international conference on Hardware/software codesign and system synthesis, Paris, France, 27-30 March 2000, pp. 292-297
2000
DOI URL PDF 
Abstract: Functional BIST is a promising solution for self-testing complex digital systems at reduced costs in terms of area and performance degradation. The present paper addresses the computation of optimal seeds for an arbitrary sequential module to be used as hardware test pattern generator. Up to now, only linear feedback shift registers and accumulator based structures have been used for deterministic test pattern generation by reseeding. In this paper, a method is proposed which can be applied to general finite state machines. Nevertheless the method is absolutely general, for sake of comparison with previous approaches, in this paper an accumulator based unit is assumed as pattern generator module. Experiments prove the effectiveness of the approach which outperforms previous results for accumulators, in terms of test size and test time, without sacrifying the fault detection capability.
BibTeX:
@inproceedings{CatalCPW2000,
  author = {Cataldo, Silvia and Chiusano, Silvia and Prinetto, Paolo and Wunderlich, Hans-Joachim},
  title = {{Optimal Hardware Pattern Generation for Functional BIST}},
  booktitle = {Proceedings of the 7th IEEE/ACM/IFIP international conference on Hardware/software codesign and system synthesis},
  publisher = {IEEE Computer Society},
  year = {2000},
  pages = {292--297},
  abstract = {Functional BIST is a promising solution for self-testing complex digital systems at reduced costs in terms of area and performance degradation. The present paper addresses the computation of optimal seeds for an arbitrary sequential module to be used as hardware test pattern generator. Up to now, only linear feedback shift registers and accumulator based structures have been used for deterministic test pattern generation by reseeding. In this paper, a method is proposed which can be applied to general finite state machines. Nevertheless the method is absolutely general, for sake of comparison with previous approaches, in this paper an accumulator based unit is assumed as pattern generator module. Experiments prove the effectiveness of the approach which outperforms previous results for accumulators, in terms of test size and test time, without sacrifying the fault detection capability.},
  url = {http://www.computer.org/csdl/proceedings/date/2000/0537/00/05370292-abs.html},
  doi = {http://dx.doi.org/10.1109/DATE.2000.840286},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/2000/DATE_CatalCPW2000.pdf}
}
75. Minimized Power Consumption for Scan-Based BIST
Gerstendörfer, S. and Wunderlich, H.-J.
Proceedings of the 30th IEEE International Test Conference (ITC'99), Atlantic City, New Jersey, USA, 28-30 September 1999, pp. 77-84
1999
DOI URL PDF 
Keywords: BIST; Low Power; Power consumption
Abstract: Power consumption of digital systems may increase significantly during testing. In this paper, systems equipped with a scan-based built-in self-test like the STUMPS architecture are analyzed, the modules and modes with the highest power consumption are identified, and design modifications to reduce power consumption are proposed. The design modifications include some gating logic for masking the scan path activity during shifting, and the synthesis of additional logic for suppressing random patterns which do not contribute to increase the fault coverage. These design changes reduce power consumption during BIST by several orders of magnitude, at very low cost in terms of area and performance.
BibTeX:
@inproceedings{GerstW1999,
  author = {Gerstendörfer, Stefan and Wunderlich, Hans-Joachim},
  title = {{Minimized Power Consumption for Scan-Based BIST}},
  booktitle = {Proceedings of the 30th IEEE International Test Conference (ITC'99)},
  publisher = {IEEE Computer Society},
  year = {1999},
  pages = {77--84},
  keywords = {BIST; Low Power; Power consumption},
  abstract = {Power consumption of digital systems may increase significantly during testing. In this paper, systems equipped with a scan-based built-in self-test like the STUMPS architecture are analyzed, the modules and modes with the highest power consumption are identified, and design modifications to reduce power consumption are proposed. The design modifications include some gating logic for masking the scan path activity during shifting, and the synthesis of additional logic for suppressing random patterns which do not contribute to increase the fault coverage. These design changes reduce power consumption during BIST by several orders of magnitude, at very low cost in terms of area and performance.},
  url = {http://www.computer.org/csdl/proceedings/itc/1999/5753/00/57530077-abs.html},
  doi = {http://dx.doi.org/10.1109/TEST.1999.805616},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1999/ITC_GerstW1999.pdf}
}
74. Transparent Word-oriented Memory BIST Based on Symmetric March Algorithms
Yarmolik, V.N., Bykov, I., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 3rd European Dependable Computing Conference (EDCC-3)
Vol. 1667, Prague, Czech Republic, 15-17 September 1999, pp. 339-350
1999
DOI URL PDF 
Abstract: The paper presents a new approach to transparent BIST for word-oriented RAMs which is based on the transformation of March transparent test algorithms to the symmetric versions. This approach allows to skip the signature prediction phase inherent to conventional transparent memory testing and therefore to significantly reduce test time. The hardware overhead and fault coverage of the new BIST scheme are comparable to the conventional transparent BIST structures. Experimental results show that in many cases the proposed test techniques achieve a higher fault coverage in shorter test time.
BibTeX:
@inproceedings{YarmoBHW1999,
  author = {Yarmolik, Vyacheslav N. and Bykov, I.V. and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Transparent Word-oriented Memory BIST Based on Symmetric March Algorithms}},
  booktitle = {Proceedings of the 3rd European Dependable Computing Conference (EDCC-3)},
  publisher = {Springer-Verlag},
  year = {1999},
  volume = {1667},
  pages = {339--350},
  abstract = {The paper presents a new approach to transparent BIST for word-oriented RAMs which is based on the transformation of March transparent test algorithms to the symmetric versions. This approach allows to skip the signature prediction phase inherent to conventional transparent memory testing and therefore to significantly reduce test time. The hardware overhead and fault coverage of the new BIST scheme are comparable to the conventional transparent BIST structures. Experimental results show that in many cases the proposed test techniques achieve a higher fault coverage in shorter test time.},
  url = {http://dl.acm.org/citation.cfm?id=645332.649825},
  doi = {http://dx.doi.org/10.1007/3-540-48254-7_23},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1999/EDCC_YarmoBHW1999.pdf}
}
73. Deterministic BIST with Partial Scan
Kiefer, G. and Wunderlich, H.-J.
Proceedings of the 4th IEEE European Test Workshop (ETW'99), Constance, Germany, 25-28, May 1999, pp. 110-117
1999
DOI URL PDF 
Keywords: deterministic BIST; partial scan
Abstract: An efficient deterministic BIST scheme based on partial scan chains together with a scan selection algorithm tailored for BIST is presented. The algorithm determines a minimum number of flipflops to be scannable so that the remaining circuit has a pipeline-like structure. Experiments show that scanning less flipflops may even decrease the hardware overhead for the on-chip pattern generator besides the classical advantages of partial scan such as less impact on the system performance and less hardware overhead.
BibTeX:
@inproceedings{KiefeW1999a,
  author = {Kiefer, Gundolf and Wunderlich, Hans-Joachim},
  title = {{Deterministic BIST with Partial Scan}},
  booktitle = {Proceedings of the 4th IEEE European Test Workshop (ETW'99)},
  publisher = {IEEE Computer Society},
  year = {1999},
  pages = {110--117},
  keywords = {deterministic BIST; partial scan},
  abstract = {An efficient deterministic BIST scheme based on partial scan chains together with a scan selection algorithm tailored for BIST is presented. The algorithm determines a minimum number of flipflops to be scannable so that the remaining circuit has a pipeline-like structure. Experiments show that scanning less flipflops may even decrease the hardware overhead for the on-chip pattern generator besides the classical advantages of partial scan such as less impact on the system performance and less hardware overhead.},
  url = {http://www.computer.org/csdl/proceedings/etw/1999/0390/00/03900110-abs.html},
  doi = {http://dx.doi.org/10.1109/ETW.1999.804415},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1999/ETW_KiefeW1999a.pdf}
}
72. Error Detecting Refreshment for Embedded DRAMs
Hellebrand, S., Wunderlich, H.-J., Ivaniuk, A., Klimets, Y. and Yarmolik, V.N.
Proceedings of the 17th IEEE VLSI Test Symposium (VTS'99), Dana Point, California, USA, 25-29 April 1999, pp. 384-390
1999
DOI URL PDF 
Abstract: This paper presents a new technique for on-line consistency checking of embedded DRAMs. The basic idea is to use the refresh cycle for concurrently computing a test characteristic of the memory contents and compare it to a precomputed reference characteristic. Experiments show that the proposed technique significantly reduces the time between the occurrence of an error and its detection (error detection latency). It also achieves a very high error coverage at low hardware costs. Therefore it perfectly complements standard on-line checking approaches relying on error detecting codes, where the detection of certain types of errors is guaranteed, but only during READ operations accessing the erroneous data.
BibTeX:
@inproceedings{HelleWIKY1999,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim and Ivaniuk, Alexander and Klimets, Yuri and Yarmolik, Vyacheslav N.},
  title = {{Error Detecting Refreshment for Embedded DRAMs}},
  booktitle = {Proceedings of the 17th IEEE VLSI Test Symposium (VTS'99)},
  publisher = {IEEE Computer Society},
  year = {1999},
  pages = {384--390},
  abstract = {This paper presents a new technique for on-line consistency checking of embedded DRAMs. The basic idea is to use the refresh cycle for concurrently computing a test characteristic of the memory contents and compare it to a precomputed reference characteristic. Experiments show that the proposed technique significantly reduces the time between the occurrence of an error and its detection (error detection latency). It also achieves a very high error coverage at low hardware costs. Therefore it perfectly complements standard on-line checking approaches relying on error detecting codes, where the detection of certain types of errors is guaranteed, but only during READ operations accessing the erroneous data.},
  url = {http://www.computer.org/csdl/proceedings/vts/1999/0146/00/01460384-abs.html},
  doi = {http://dx.doi.org/10.1109/VTEST.1999.766693},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1999/VTS_HelleWIKY1999.pdf}
}
71. Symmetric Transparent BIST for RAMs
Yarmolik, V.N., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 2nd Conference on Design, Automation and Test in Europe (DATE'99), Munich, Germany, 9-12 March 1999, pp. 702-707
1999
DOI URL PDF 
Abstract: The paper introduces the new concept of symmetric transparent BIST for RAMs. This concept allows to skip the signature prediction phase of conventional transparent BIST approaches and therefore yields a significant reduction of test time. The hardware cost and the fault coverage of the new scheme remain comparable to that of a traditional transparent BIST scheme. In many cases, experimental studies even show a higher fault coverage obtained in shorter test time.
BibTeX:
@inproceedings{YarmoHW1999,
  author = {Yarmolik, Vyacheslav N. and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Symmetric Transparent BIST for RAMs}},
  booktitle = {Proceedings of the 2nd Conference on Design, Automation and Test in Europe (DATE'99)},
  publisher = {IEEE Computer Society},
  year = {1999},
  pages = {702--707},
  abstract = {The paper introduces the new concept of symmetric transparent BIST for RAMs. This concept allows to skip the signature prediction phase of conventional transparent BIST approaches and therefore yields a significant reduction of test time. The hardware cost and the fault coverage of the new scheme remain comparable to that of a traditional transparent BIST scheme. In many cases, experimental studies even show a higher fault coverage obtained in shorter test time.},
  url = {http://www.computer.org/csdl/proceedings/date/1999/0078/00/00780702-abs.html},
  doi = {http://dx.doi.org/10.1109/DATE.1999.761206},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1999/DATE_YarmoHW1999.pdf}
}
70. Deterministic BIST with Multiple Scan Chains
Kiefer, G. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 14(1-2), February 1999, pp. 85-93
1999
DOI URL PDF 
Keywords: deterministic scan-based BIST; multiple scan paths; parallel scan
Abstract: A deterministic BIST scheme for circuits with multiple scan paths is presented. A procedure is described for synthesizing a pattern generator which stimulates all scan chains simultaneously and guarantees complete fault coverage.
The new scheme may require less chip area than a classical LFSR-based approach while better or even complete fault coverage is obtained at the same time.
BibTeX:
@article{KiefeW1999,
  author = {Kiefer, Gundolf and Wunderlich, Hans-Joachim},
  title = {{Deterministic BIST with Multiple Scan Chains}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {1999},
  volume = {14},
  number = {1-2},
  pages = {85--93},
  keywords = {deterministic scan-based BIST; multiple scan paths; parallel scan},
  abstract = {A deterministic BIST scheme for circuits with multiple scan paths is presented. A procedure is described for synthesizing a pattern generator which stimulates all scan chains simultaneously and guarantees complete fault coverage.
The new scheme may require less chip area than a classical LFSR-based approach while better or even complete fault coverage is obtained at the same time.}, url = {http://dl.acm.org/citation.cfm?id=309342.309357}, doi = {http://dx.doi.org/10.1023/A:1008353423305}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1999/JETTA_KiefeW1999.pdf} }
69. Special ATPG to Correlate Test Patterns for Low-Overhead Mixed-Mode BIST
Karkala, M., Touba, N.A. and Wunderlich, H.-J.
Proceedings of the 7th Asian Test Symposium (ATS'98), Singapore, 2-4 December 1998, pp. 492-499
1998
DOI URL PDF 
Abstract: In mixed-mode BIST, deterministic test patterns are generated with on-chip hardware to detect the random-pattern-resistant (r.p.r.) faults that are missed by the pseudo-random patterns. While previous work in mixed-mode BIST has focussed on developing hardware schemes for more efficiently encoding a given set of deterministic patterns (generated by a conventional ATPG procedure), the approach taken in this paper is to improve the encoding efficiency (and hence reduce hardware overhead) by specially selecting a set of deterministic patterns for the r.p.r. faults that can be efficiently encoded. A special ATPG procedure is described for finding test patterns for the r.p.r. faults that are correlated (have the same logic value) in many bit positions. Such test patterns can be efficiently encoded with one of the many ``bit-fixing'' schemes that have been described in the literature. Results are shown for different bit-fixing schemes which indicate dramatic reductions in BIST overhead can be achieved by using the proposed ATPG procedure to select which test patterns to encode.
BibTeX:
@inproceedings{KarkaTW1998,
  author = {Karkala, Madhavi and Touba, Nur A. and Wunderlich, Hans-Joachim},
  title = {{Special ATPG to Correlate Test Patterns for Low-Overhead Mixed-Mode BIST}},
  booktitle = {Proceedings of the 7th Asian Test Symposium (ATS'98)},
  publisher = {IEEE Computer Society},
  year = {1998},
  pages = {492--499},
  abstract = {In mixed-mode BIST, deterministic test patterns are generated with on-chip hardware to detect the random-pattern-resistant (r.p.r.) faults that are missed by the pseudo-random patterns. While previous work in mixed-mode BIST has focussed on developing hardware schemes for more efficiently encoding a given set of deterministic patterns (generated by a conventional ATPG procedure), the approach taken in this paper is to improve the encoding efficiency (and hence reduce hardware overhead) by specially selecting a set of deterministic patterns for the r.p.r. faults that can be efficiently encoded. A special ATPG procedure is described for finding test patterns for the r.p.r. faults that are correlated (have the same logic value) in many bit positions. Such test patterns can be efficiently encoded with one of the many ``bit-fixing'' schemes that have been described in the literature. Results are shown for different bit-fixing schemes which indicate dramatic reductions in BIST overhead can be achieved by using the proposed ATPG procedure to select which test patterns to encode.},
  url = {http://www.computer.org/csdl/proceedings/ats/1998/8277/00/82770492-abs.html},
  doi = {http://dx.doi.org/10.1109/ATS.1998.741662},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1998/ATS_KarkaTW1998.pdf}
}
68. BIST for Systems-on-a-Chip
Wunderlich, H.-J.
Integration, the VLSI Journal - Special issue on VLSI testing
Vol. 26(1-2), Dezember 1998, pp. 55-78
1998
DOI URL PDF 
Keywords: BIST; Systems-on-chip; Deterministic BIST; Functional BIST
Abstract: An increasing part of microelectronic systems is implemented on the basis of predesigned and preverified modules, so-called cores, which are reused in many instances. Core-providers offer RISC-kernels, embedded memories, DSPs, and many other functions, and built-in self-test ist the appropriate method for testing complex systems composed of different cores In this paper, we overview BIST methods for different types of cores and present advanced BIST solutions. Special emphasis is put on deterministic BIST methods as they do not require any modifications of the core under test and help to protect intellectual property (IP).
BibTeX:
@article{Wunde1998,
  author = {Wunderlich, Hans-Joachim},
  title = {{BIST for Systems-on-a-Chip}},
  journal = {Integration, the VLSI Journal - Special issue on VLSI testing},
  publisher = {Elsevier Science Publishers B. V.},
  year = {1998},
  volume = {26},
  number = {1-2},
  pages = {55--78},
  keywords = {BIST; Systems-on-chip; Deterministic BIST; Functional BIST},
  abstract = {An increasing part of microelectronic systems is implemented on the basis of predesigned and preverified modules, so-called cores, which are reused in many instances. Core-providers offer RISC-kernels, embedded memories, DSPs, and many other functions, and built-in self-test ist the appropriate method for testing complex systems composed of different cores In this paper, we overview BIST methods for different types of cores and present advanced BIST solutions. Special emphasis is put on deterministic BIST methods as they do not require any modifications of the core under test and help to protect intellectual property (IP).},
  url = {http://dl.acm.org/citation.cfm?id=306217.306222},
  doi = {http://dx.doi.org/10.1016/S0167-9260(98)00021-2},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1998/Wunde1998.pdf}
}
67. Accumulator Based Deterministic BIST
Dorsch, R. and Wunderlich, H.-J.
Proceedings of the 29th IEEE International Test Conference (ITC'98), Washington, DC, USA, 18-23 October 1998, pp. 412-421
1998
DOI URL PDF 
Keywords: BIST; hardware pattern generator; embedded cores
Abstract: Most built-in self test (BIST) solutions require specialized test pattern generation hardware which may introduce significant area overhead and performance degradation. Recently, some authors proposed test pattern generation on chip by means of functional units also used in system mode like adders or multipliers. These schemes generate pseudo-random or pseudo-exhaustive patterns for serial or parallel BIST. If the circuit under test contains random pattern resistant faults a deterministic test pattern generator is necessary to obtain complete fault coverage.
In this paper it is shown that a deterministic test set can be encoded as initial values of an accumulator based structure, and all testable faults can be detected within a given test length by carefully selecting the seeds of the accumulator. A ROM is added for storing the seeds, and the control logic of the accumulator is modified. In most cases the size of the ROM is less than the size required by traditional LFSR-based reseeding approaches.
BibTeX:
@inproceedings{DorscW1998,
  author = {Dorsch, Rainer and Wunderlich, Hans-Joachim},
  title = {{Accumulator Based Deterministic BIST}},
  booktitle = {Proceedings of the 29th IEEE International Test Conference (ITC'98)},
  publisher = {IEEE Computer Society},
  year = {1998},
  pages = {412--421},
  keywords = {BIST; hardware pattern generator; embedded cores},
  abstract = {Most built-in self test (BIST) solutions require specialized test pattern generation hardware which may introduce significant area overhead and performance degradation. Recently, some authors proposed test pattern generation on chip by means of functional units also used in system mode like adders or multipliers. These schemes generate pseudo-random or pseudo-exhaustive patterns for serial or parallel BIST. If the circuit under test contains random pattern resistant faults a deterministic test pattern generator is necessary to obtain complete fault coverage.
In this paper it is shown that a deterministic test set can be encoded as initial values of an accumulator based structure, and all testable faults can be detected within a given test length by carefully selecting the seeds of the accumulator. A ROM is added for storing the seeds, and the control logic of the accumulator is modified. In most cases the size of the ROM is less than the size required by traditional LFSR-based reseeding approaches.}, url = {http://www.computer.org/csdl/proceedings/itc/1998/5093/00/50930412-abs.html}, doi = {http://dx.doi.org/10.1109/TEST.1998.743181}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1998/ITC_DorscW1998.pdf} }
66. Deterministic BIST with Multiple Scan Chains
Kiefer, G. and Wunderlich, H.-J.
Proceedings of the 29th IEEE International Test Conference (ITC'98), Washington, DC, USA, 18-23 October 1998, pp. 1057-1064
1998
DOI URL PDF 
Keywords: deterministic scan-based BIST; multiple scan paths; parallel scan
Abstract: A deterministic BIST scheme for circuits with multiple scan paths is presented. A procedure is described for synthesizing a pattern generator which stimulates all scan chains simultaneously and guarantees complete fault coverage.
The new scheme may require less chip area than a classical LFSR-based approach while better or even complete fault coverage is obtained at the same time.
BibTeX:
@inproceedings{KiefeW1998,
  author = {Kiefer, Gundolf and Wunderlich, Hans-Joachim},
  title = {{Deterministic BIST with Multiple Scan Chains}},
  booktitle = {Proceedings of the 29th IEEE International Test Conference (ITC'98)},
  publisher = {IEEE Computer Society},
  year = {1998},
  pages = {1057--1064},
  keywords = {deterministic scan-based BIST; multiple scan paths; parallel scan},
  abstract = {A deterministic BIST scheme for circuits with multiple scan paths is presented. A procedure is described for synthesizing a pattern generator which stimulates all scan chains simultaneously and guarantees complete fault coverage.
The new scheme may require less chip area than a classical LFSR-based approach while better or even complete fault coverage is obtained at the same time.}, url = {http://www.computer.org/csdl/proceedings/itc/1998/5093/00/50931057-abs.html}, doi = {http://dx.doi.org/10.1109/TEST.1998.743304}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1998/ITC_KiefeW1998.pdf} }
65. Synthesizing Fast, Online-Testable Control Units
Hellebrand, S., Wunderlich, H.-J. and Hertwig, A.
IEEE Design & Test of Computers
Vol. 15(4), October-December 1998, pp. 36-41
1998
DOI URL PDF 
Abstract: The authors present the self-checking bypass pipeline, an online-testable controller structure for data-dominated applications. For most circuits in a standard benchmark set, this structure leads to a performance improvement of more than 30% with an area overhead less than 15% that of conventional online-testable finite-state machines.
BibTeX:
@article{HelleWH1998a,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim and Hertwig, Andre},
  title = {{Synthesizing Fast, Online-Testable Control Units}},
  journal = {IEEE Design & Test of Computers},
  publisher = {IEEE Computer Society},
  year = {1998},
  volume = {15},
  number = {4},
  pages = {36--41},
  abstract = {The authors present the self-checking bypass pipeline, an online-testable controller structure for data-dominated applications. For most circuits in a standard benchmark set, this structure leads to a performance improvement of more than 30% with an area overhead less than 15% that of conventional online-testable finite-state machines.},
  url = {http://dl.acm.org/citation.cfm?id=622192.622859},
  doi = {http://dx.doi.org/10.1109/54.735925},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1998/D&T_HelleWH1998a.pdf}
}
64. New Transparent RAM BIST Based on Self-Adjusting Output Data Compression
Yarmolik, V.N., Klimets, Y., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the Design and Diagnostics of Electronic Circuits and Systems (DDECS'98), Szczyrk, Poland, 02-04 September 1998, pp. 27-33
1998
URL PDF 
Abstract: The new memory transparent BIST technique is proposed in this paper. It has more higher fault coverage compare to classical transparent technique. Also this technique decreases the test complexity up to 50% for the most of march tests.
BibTeX:
@inproceedings{YarmoKHW1998,
  author = {Yarmolik, Vyacheslav N. and Klimets, Yuri and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{New Transparent RAM BIST Based on Self-Adjusting Output Data Compression}},
  booktitle = {Proceedings of the Design and Diagnostics of Electronic Circuits and Systems (DDECS'98)},
  publisher = {Gliwice: Silesian Techn. Univ. Press},
  year = {1998},
  pages = {27--33},
  abstract = {The new memory transparent BIST technique is proposed in this paper. It has more higher fault coverage compare to classical transparent technique. Also this technique decreases the test complexity up to 50% for the most of march tests.},
  url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1998-39&engl=0},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1998/DDECS_YarmoKHW1998.pdf}
}
63. Hardware-Optimal Test Register Insertion
Stroele, A.P. and Wunderlich, H.-J.
IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)
Vol. 17(6), June 1998, pp. 531-539
1998
DOI PDF 
Keywords: BILBO; built-in self-test; CBILBO; test register insertion
Abstract: Implementing a built-in self-test by a ``test per clock'' scheme offers advantages concerning fault coverage, detection of delay faults and test application time. Such a scheme is implemented by test registers, for instance BILBOs or CBILBOs, which are inserted into the circuit structure at appropriate places. An algorithm is presented which is able to find the cost optimal placement of test registers for nearly all the ISCAS'89 sequential benchmark circuits, and a suboptimal solution with slightly higher costs is obtained for all the circuits within a few minutes of computing time. The algorithm can also be applied to the Minimum Feedback Vertex Set problem in partial scan desing, and an optimal solution is found for all the benchmark circuits.
The proveably optimal solutions for the benchmark circuits mainly use CBILBOs which can simultaneously generate test patterns and compact test responses. Hence, test scheduling is not required, test control is simplified, and test application time is reduced.
BibTeX:
@article{StroeW1998,
  author = {Stroele, Albrecht P. and Wunderlich, Hans-Joachim},
  title = {{Hardware-Optimal Test Register Insertion}},
  journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)},
  year = {1998},
  volume = {17},
  number = {6},
  pages = {531--539},
  keywords = {BILBO; built-in self-test; CBILBO; test register insertion},
  abstract = {Implementing a built-in self-test by a ``test per clock'' scheme offers advantages concerning fault coverage, detection of delay faults and test application time. Such a scheme is implemented by test registers, for instance BILBOs or CBILBOs, which are inserted into the circuit structure at appropriate places. An algorithm is presented which is able to find the cost optimal placement of test registers for nearly all the ISCAS'89 sequential benchmark circuits, and a suboptimal solution with slightly higher costs is obtained for all the circuits within a few minutes of computing time. The algorithm can also be applied to the Minimum Feedback Vertex Set problem in partial scan desing, and an optimal solution is found for all the benchmark circuits.
The proveably optimal solutions for the benchmark circuits mainly use CBILBOs which can simultaneously generate test patterns and compact test responses. Hence, test scheduling is not required, test control is simplified, and test application time is reduced.}, doi = {http://dx.doi.org/10.1109/43.703833}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1998/TCAD_StroeW1998.pdf} }
62. Fast Self-Recovering Controllers
Hertwig, A., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 16th IEEE VLSI Test Symposium (VTS'98), Monterey, California, USA, 26-30 April 1998, pp. 296-302
1998
DOI URL PDF 
Keywords: FSM synthesis; fault-tolerance; checkpointing; performance-driven synthesis
Abstract: A fast fault-tolerant controller structure is presented, which is capable of recovering from transient faults by performing a rollback operation in hardware.
The proposed fault-tolerant controller structure utilizes the rollback hardware also for system mode and this way achieves performance improvements of more than 50% compared to controller structures made fault-tolerant by conventional techniques, while the hardware overhead is often negligible. The proposed approach is compatible with state-of-the-art methods for FSM decomposition, state encoding and logic synthesis.
BibTeX:
@inproceedings{HertwHW1998,
  author = {Hertwig, Andre and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Fast Self-Recovering Controllers}},
  booktitle = {Proceedings of the 16th IEEE VLSI Test Symposium (VTS'98)},
  publisher = {IEEE Computer Society},
  year = {1998},
  pages = {296--302},
  keywords = {FSM synthesis; fault-tolerance; checkpointing; performance-driven synthesis},
  abstract = {A fast fault-tolerant controller structure is presented, which is capable of recovering from transient faults by performing a rollback operation in hardware.
The proposed fault-tolerant controller structure utilizes the rollback hardware also for system mode and this way achieves performance improvements of more than 50% compared to controller structures made fault-tolerant by conventional techniques, while the hardware overhead is often negligible. The proposed approach is compatible with state-of-the-art methods for FSM decomposition, state encoding and logic synthesis.}, url = {http://www.computer.org/csdl/proceedings/vts/1998/8436/00/84360296-abs.html}, doi = {http://dx.doi.org/10.1109/VTEST.1998.670883}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1998/VTS_HertwHW1998.pdf} }
61. Self-Adjusting Output Data Compression: An Efficient BIST Technique for RAMs
Yarmolik, V.N., Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 1st Conference on Design, Automation and Test in Europe (DATE'98), Paris, France, 23-26 February 1998, pp. 173-179
1998
DOI URL PDF 
Keywords: built-in self test; data compression; integrated circuit testing; random-access storage
Abstract: After write operations, BIST schemes for RAMs relying on signature analysis must compress the entire memory contents to update the reference signature. This paper introduces a new scheme for output data compression which avoids this overhead while retaining the benefits of signature analysis. The proposed technique is based on a new memory characteristic derived as the modulo-2 sum of all addresses pointing to non-zero cells. This characteristic can be adjusted concurrently with write operations by simple EXOR-operations on the initial characteristic and on the addresses affected by the change.
BibTeX:
@inproceedings{YarmoHW1998,
  author = {Yarmolik, Vyacheslav N. and Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Self-Adjusting Output Data Compression: An Efficient BIST Technique for RAMs}},
  booktitle = {Proceedings of the 1st Conference on Design, Automation and Test in Europe (DATE'98)},
  publisher = {IEEE Computer Society},
  year = {1998},
  pages = {173--179},
  keywords = {built-in self test; data compression; integrated circuit testing; random-access storage},
  abstract = {After write operations, BIST schemes for RAMs relying on signature analysis must compress the entire memory contents to update the reference signature. This paper introduces a new scheme for output data compression which avoids this overhead while retaining the benefits of signature analysis. The proposed technique is based on a new memory characteristic derived as the modulo-2 sum of all addresses pointing to non-zero cells. This characteristic can be adjusted concurrently with write operations by simple EXOR-operations on the initial characteristic and on the addresses affected by the change.},
  url = {http://www.computer.org/csdl/proceedings/date/1998/8359/00/83590173-abs.html},
  doi = {http://dx.doi.org/10.1109/DATE.1998.655853},
  file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1998/DATE_YarmoHW1998.pdf}
}
60. Mixed-Mode BIST Using Embedded Processors
Hellebrand, S., Wunderlich, H.-J. and Hertwig, A.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 12(1-2), February 1998, pp. 127-138
1998
DOI URL PDF 
Keywords: BIST; random pattern testing; deterministic BIST; embedded systems
Abstract: In complex systems, embedded processors may be used to run software for test pattern generation and response evaluation. For system components which are not completely random pattern testable, the test programs have to generate deterministic patterns after random testing. Usually the random test part of the program requires long run times whereas the part for deterministic testing has high memory requirements.
In this paper it is shown that an appropriate selection of the random pattern test method can significantly reduce the memory requirements of the deterministic part. A new, highly efficient scheme for software-based random pattern testing is proposed, and it is shown how to extend the scheme for deterministic test pattern generation. The entire test scheme may also be used for implementing a scan based BIST in hardware.
BibTeX:
@article{HelleWH1998,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim and Hertwig, Andre},
  title = {{Mixed-Mode BIST Using Embedded Processors}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {1998},
  volume = {12},
  number = {1-2},
  pages = {127--138},
  keywords = {BIST; random pattern testing; deterministic BIST; embedded systems},
  abstract = {In complex systems, embedded processors may be used to run software for test pattern generation and response evaluation. For system components which are not completely random pattern testable, the test programs have to generate deterministic patterns after random testing. Usually the random test part of the program requires long run times whereas the part for deterministic testing has high memory requirements.
In this paper it is shown that an appropriate selection of the random pattern test method can significantly reduce the memory requirements of the deterministic part. A new, highly efficient scheme for software-based random pattern testing is proposed, and it is shown how to extend the scheme for deterministic test pattern generation. The entire test scheme may also be used for implementing a scan based BIST in hardware.}, url = {http://dl.acm.org/citation.cfm?id=290368.290382}, doi = {http://dx.doi.org/10.1023/A:1008294125692}, file = {http://www.iti.uni-stuttgart.de//fileadmin/rami/files/publications/1998/JETTA_HelleWH1998.pdf} }
59. Using BIST Control for Pattern Generation
Kiefer, G. and Wunderlich, H.-J.
Proceedings of the 28th IEEE International Test Conference (ITC'97), Washington, DC, USA, 1-6 November 1997, pp. 347-355
1997
DOI URL PDF 
Keywords: deterministic BIST; scan-based BIST
Abstract: A deterministic BIST scheme is presented which requires less hardware overhead than pseudo-random BIST but obtains better or even complete fault coverage at the same time. It takes advantage of the fact that any autonomous BIST scheme needs a BIST control unit for indicating the completion of the self-test at least.
Hence, pattern counters and bit counters are always available, and they provide information to be used for deterministic pattern generation by some additional circuitry. This paper presents a systematic way for synthesizing a pattern generator which needs less area than a 32-bit LFSR for random pattern generation for all the benchmark circuits.
BibTeX:
@inproceedings{KiefeW1997,
  author = {Kiefer, Gundolf and Wunderlich, Hans-Joachim},
  title = {{Using BIST Control for Pattern Generation}},
  booktitle = {Proceedings of the 28th IEEE International Test Conference (ITC'97)},
  publisher = {IEEE Computer Society},
  year = {1997},
  pages = {347--355},
  keywords = {deterministic BIST; scan-based BIST},
  abstract = {A deterministic BIST scheme is presented which requires less hardware overhead than pseudo-random BIST but obtains better or even complete fault coverage at the same time. It takes advantage of the fact that any autonomous BIST scheme needs a BIST control unit for indicating the completion of the self-test at least.
Hence, pattern counters and bit counters are always available, and they provide information to be used for deterministic pattern generation by some additional circuitry. This paper presents a systematic way for synthesizing a pattern generator which needs less area than a 32-bit LFSR for random pattern generation for all the benchmark circuits.}, url = {http://www.computer.org/csdl/proceedings/itc/1997/4210/00/42100347-abs.html}, doi = {http://dx.doi.org/10.1109/TEST.1997.639636}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1997/ITC_KiefeW1997.pdf} }
58. STARBIST: Scan Autocorrelated Random Pattern Generation
Tsai, K.-H., Hellebrand, S., Rajski, J. and Marek-Sadowska, M.
Proceedings of the 34th ACM/IEEE Design Automation Conference (DAC'97), Anaheim, California, USA, 9-13 June 1997, pp. 472-477
1997
DOI URL PDF 
Abstract: This paper presents a new scan-based BIST scheme which achieves very high fault coverage without the deficiencies of previously proposed schemes. This approach utilizes scan order
and polarity in scan synthesis, effectively converting the scan chain into a ROM capable of storing some “center” patterns from which the other vectors are derived by randomly complementing some of their coordinates. Experimental results demonstrate that a very high fault coverage can be obtained without any modification of the mission logic, no test data to store and very simple BIST hardware which does not depend on the size of the circuit.
BibTeX:
@inproceedings{TsaiHRM1997,
  author = {Tsai, Kun-Han and Hellebrand, Sybille and Rajski, Janusz and Marek-Sadowska, Malgorzata},
  title = {{STARBIST: Scan Autocorrelated Random Pattern Generation}},
  booktitle = {Proceedings of the 34th ACM/IEEE Design Automation Conference (DAC'97)},
  publisher = {IEEE Computer Society},
  year = {1997},
  pages = {472--477},
  abstract = {This paper presents a new scan-based BIST scheme which achieves very high fault coverage without the deficiencies of previously proposed schemes. This approach utilizes scan order
and polarity in scan synthesis, effectively converting the scan chain into a ROM capable of storing some “center” patterns from which the other vectors are derived by randomly complementing some of their coordinates. Experimental results demonstrate that a very high fault coverage can be obtained without any modification of the mission logic, no test data to store and very simple BIST hardware which does not depend on the size of the circuit.}, url = {http://www.computer.org/csdl/proceedings/dac/1997/2477/00/24770472-abs.html}, doi = {http://dx.doi.org/10.1109/DAC.1997.597194}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1997/DAC_TsaiHRM1997.pdf} }
57. Fast Controllers for Data Dominated Applications
Hertwig, A. and Wunderlich, H.-J.
Proceedings of the European Design & Test Conference (ED&TC'97), Paris, France, 17-20 March 1997, pp. 84-89
1997
DOI URL PDF 
Keywords: FSM synthesis; performance driven synthesis; synthesis of testable controllers
Abstract: A target structure for implementing fast edge-triggered control units is presented. In many cases, the proposed controller is faster than a one-hot encoded structure as its correct timing does not require master-slave flip-flops even in the presence of unpredictable clocking skews.
A synthesis procedure is proposed which leads to a performance improvement of 40% on average for the standard benchmark set whereas the additional area is less than 25% compared with conventional finite state machine (FSM) synthesis. The proposed approach is compatible with the state-of-the-art methods for FSM decomposition, state encoding and logic synthesis.
BibTeX:
@inproceedings{HertwW1997,
  author = {Hertwig, Andre and Wunderlich, Hans-Joachim},
  title = {{Fast Controllers for Data Dominated Applications}},
  booktitle = {Proceedings of the European Design & Test Conference (ED&TC'97)},
  publisher = {IEEE Computer Society},
  year = {1997},
  pages = {84--89},
  keywords = {FSM synthesis; performance driven synthesis; synthesis of testable controllers},
  abstract = {A target structure for implementing fast edge-triggered control units is presented. In many cases, the proposed controller is faster than a one-hot encoded structure as its correct timing does not require master-slave flip-flops even in the presence of unpredictable clocking skews. 
A synthesis procedure is proposed which leads to a performance improvement of 40% on average for the standard benchmark set whereas the additional area is less than 25% compared with conventional finite state machine (FSM) synthesis. The proposed approach is compatible with the state-of-the-art methods for FSM decomposition, state encoding and logic synthesis.}, url = {http://dl.acm.org/citation.cfm?id=787260.787641}, doi = {http://dx.doi.org/10.1109/EDTC.1997.582337}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1997/ED&TC_HertwW1997.pdf} }
56. Bit-Flipping BIST
Wunderlich, H.-J. and Kiefer, G.
Proceedings of the ACM/IEEE International Conference on Computer-Aided Design (ICCAD'96), San Jose, California, USA, 10-14 November 1996, pp. 337-343
1996
DOI URL PDF 
Keywords: Mixed-Mode BIST
Abstract: A scan-based BIST scheme is presented which guarantees complete fault coverage with very low hardware overhead. A probabilistic analysis shows that the output of an LFSR which feeds a scan path has to be modified only at a few bits in order to transform the random patterns into a complete test set. These modifications may be implemented by a bit-flipping function which has the LFSR-state as an input, and flips the value shifted into the scan path at certain times. A procedure is described for synthesizing the additional bit-flipping circuitry, and the experimental results indicate that this mixed-mode BIST scheme requires less hardware for complete fault coverage than all the other scan-based BIST approaches published so far.
BibTeX:
@inproceedings{WundeK1996,
  author = {Wunderlich, Hans-Joachim and Kiefer, Gundolf},
  title = {{Bit-Flipping BIST}},
  booktitle = {Proceedings of the ACM/IEEE International Conference on Computer-Aided Design (ICCAD'96)},
  publisher = {IEEE Computer Society},
  year = {1996},
  pages = {337--343},
  keywords = {Mixed-Mode BIST},
  abstract = {A scan-based BIST scheme is presented which guarantees complete fault coverage with very low hardware overhead. A probabilistic analysis shows that the output of an LFSR which feeds a scan path has to be modified only at a few bits in order to transform the random patterns into a complete test set. These modifications may be implemented by a bit-flipping function which has the LFSR-state as an input, and flips the value shifted into the scan path at certain times. A procedure is described for synthesizing the additional bit-flipping circuitry, and the experimental results indicate that this mixed-mode BIST scheme requires less hardware for complete fault coverage than all the other scan-based BIST approaches published so far.},
  url = {http://www.computer.org/csdl/proceedings/iccad/1996/7597/00/75970337-abs.html},
  doi = {http://dx.doi.org/10.1109/ICCAD.1996.569803},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1996/ICCAD_WundeK1996.pdf}
}
55. Mixed-Mode BIST Using Embedded Processors
Hellebrand, S., Wunderlich, H.-J. and Hertwig, A.
Proceedings of the 27th IEEE International Test Conference (ITC'96), Washington, DC, USA, 20-25 October 1996, pp. 195-204
1996
DOI URL PDF 
Abstract: In complex systems, embedded processors may be used to run software routines for test pattern generation and response evaluation. For system components which are not completely random pattern testable, the test programs have to generate deterministic patterns after random testing. Usually the random test part of the program requires long run times whereas the part for deterministic testing has high memory requirements.
In this paper it is shown that an appropriate selection of the random pattern test method can significantly reduce the memory requirements of the deterministic part. A new, highly efficient scheme for software-based random pattern testing is proposed, and it is shown how to extend the scheme for deterministic test pattern generation. The entire test scheme may also be used for implementing a scan based BIST in hardware.
BibTeX:
@inproceedings{HelleWH1996,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim and Hertwig, Andre},
  title = {{Mixed-Mode BIST Using Embedded Processors}},
  booktitle = {Proceedings of the 27th IEEE International Test Conference (ITC'96)},
  publisher = {IEEE Computer Society},
  year = {1996},
  pages = {195--204},
  abstract = {In complex systems, embedded processors may be used to run software routines for test pattern generation and response evaluation. For system components which are not completely random pattern testable, the test programs have to generate deterministic patterns after random testing. Usually the random test part of the program requires long run times whereas the part for deterministic testing has high memory requirements.
In this paper it is shown that an appropriate selection of the random pattern test method can significantly reduce the memory requirements of the deterministic part. A new, highly efficient scheme for software-based random pattern testing is proposed, and it is shown how to extend the scheme for deterministic test pattern generation. The entire test scheme may also be used for implementing a scan based BIST in hardware.}, url = {http://www.computer.org/csdl/proceedings/itc/1996/2121/00/21210195-abs.html}, doi = {http://dx.doi.org/10.1109/TEST.1996.556962}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1996/ITC_HelleWH1996.pdf} }
54. Deterministic Pattern Generation for Weighted Random Pattern Testing
Reeb, B. and Wunderlich, H.-J.
Proceedings of the European Design & Test Conference (ED&TC'96), Paris, France, 11-14 March 1996, pp. 30-36
1996
DOI URL PDF 
Abstract: Weighted random pattern testing is now widely accepted as a very economic way for external testing as well as for implementing a built-in self-test (BIST) scheme. The weights may be computed either by structural analysis or by extracting the required information from a precomputed deterministic test set. In this paper, we present a method for generating deterministic test patterns which can easily be transformed into weight sets. These test patterns contain only minimal redundant information such that the weigth generation process is not biased, and the patterns are grouped such that the conflicts with a group are minimized. The quality of the weight sets obtained this way is superior to the approaches published so far with respect to a small number of weigths and weighted patterns, and a complete fault coverage for all the ISCAS-85 and ISCAS-89 benchmark circuits.
BibTeX:
@inproceedings{ReebW1996,
  author = {Reeb, Birgit and Wunderlich, Hans-Joachim},
  title = {{Deterministic Pattern Generation for Weighted Random Pattern Testing}},
  booktitle = {Proceedings of the European Design & Test Conference (ED&TC'96)},
  publisher = {IEEE Computer Society},
  year = {1996},
  pages = {30--36},
  abstract = {Weighted random pattern testing is now widely accepted as a very economic way for external testing as well as for implementing a built-in self-test (BIST) scheme. The weights may be computed either by structural analysis or by extracting the required information from a precomputed deterministic test set. In this paper, we present a method for generating deterministic test patterns which can easily be transformed into weight sets. These test patterns contain only minimal redundant information such that the weigth generation process is not biased, and the patterns are grouped such that the conflicts with a group are minimized. The quality of the weight sets obtained this way is superior to the approaches published so far with respect to a small number of weigths and weighted patterns, and a complete fault coverage for all the ISCAS-85 and ISCAS-89 benchmark circuits.},
  url = {http://dl.acm.org/citation.cfm?id=787259.787640},
  doi = {http://dx.doi.org/10.1109/EDTC.1996.494124},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1996/ED&TC_ReebW1996.pdf}
}
53. Pattern Generation for a Deterministic BIST Scheme
Hellebrand, S., Reeb, B., Tarnick, S. and Wunderlich, H.-J.
Proceedings of the ACM/IEEE International Conference on Computer-Aided Design (ICCAD'95), San Jose, California, USA, 5-9 November 1995, pp. 88-94
1995
DOI URL PDF 
Keywords: BIST; ATPG; Test Synthesis
Abstract: Recently a deterministic built-in self-test scheme has been presented based on reseeding of multiple-polynomial linear feedback shift registers. This scheme encodes deterministic test sets at distinctly lower costs than previously known approaches. In this paper it is shown how this scheme can be supported during test pattern generation. The presented ATPG algorithm generates test sets which can be encoded very efficiently. Experiments show that the area required for synthesizing a BIST scheme that encodes these patterns is significantly less than the area needed for storing a compact test set. Furthermore, it is demonstrated that the proposed approach of combining ATPG and BIST synthesis leads to a considerably reduced hardware overhead compared to encoding a conventionally generated test set.
BibTeX:
@inproceedings{HelleRTW1995,
  author = {Hellebrand, Sybille and Reeb, Birgit and Tarnick, Steffen and Wunderlich, Hans-Joachim},
  title = {{Pattern Generation for a Deterministic BIST Scheme}},
  booktitle = {Proceedings of the ACM/IEEE International Conference on Computer-Aided Design (ICCAD'95)},
  publisher = {IEEE Computer Society},
  year = {1995},
  pages = {88--94},
  keywords = {BIST; ATPG; Test Synthesis},
  abstract = {Recently a deterministic built-in self-test scheme has been presented based on reseeding of multiple-polynomial linear feedback shift registers. This scheme encodes deterministic test sets at distinctly lower costs than previously known approaches. In this paper it is shown how this scheme can be supported during test pattern generation. The presented ATPG algorithm generates test sets which can be encoded very efficiently. Experiments show that the area required for synthesizing a BIST scheme that encodes these patterns is significantly less than the area needed for storing a compact test set. Furthermore, it is demonstrated that the proposed approach of combining ATPG and BIST synthesis leads to a considerably reduced hardware overhead compared to encoding a conventionally generated test set.},
  url = {http://www.computer.org/csdl/proceedings/iccad/1995/7213/00/72130088-abs.html},
  doi = {http://dx.doi.org/10.1109/ICCAD.1995.479997},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1995/ICCAD_HelleRTW1995.pdf}
}
52. Test Register Insertion with Minimum Hardware Cost
Stroele, A.P. and Wunderlich, H.-J.
Proceedings of the ACM/IEEE International Conference on Computer-Aided Design (ICCAD'95), San Jose, California, USA, 5-9 November 1995, pp. 95-101
1995
DOI URL PDF 
Keywords: Built-in self-test; test register insertion; BILBO; CBILBO
Abstract: Implementing a built-in self-test by a "test per clock" scheme offers advantages concerning fault coverage, detection of delay faults, and test application time. Such a scheme is implemented by test registers, for instance BILBOs and CBILBOs, which are inserted into the circuit structure at appropriate places. An algorithm is presented which is able to find the cost optimal placement of test registers for nearly all the ISCAS'89 sequential benchmark circuits, and a suboptimal solution with slightly higher cost is obtained for all the circuits within a few minutes of computing time. The algorithm can also be applied to the Minimum Feedback Vertex Set problem in partial scan design, and an optimal solution is found for all the benchmark circuits.

The resulting self-testable circuits are analyzed. It is found that often CBILBOs lead to a minimum hardware overhead and also simplify test scheduling and test control.

BibTeX:
@inproceedings{StroeW1995,
  author = {Stroele, Albrecht P. and Wunderlich, Hans-Joachim},
  title = {{Test Register Insertion with Minimum Hardware Cost}},
  booktitle = {Proceedings of the ACM/IEEE International Conference on Computer-Aided Design (ICCAD'95)},
  publisher = {IEEE Computer Society},
  year = {1995},
  pages = {95--101},
  keywords = {Built-in self-test; test register insertion; BILBO; CBILBO},
  abstract = {Implementing a built-in self-test by a "test per clock" scheme offers advantages concerning fault coverage, detection of delay faults, and test application time. Such a scheme is implemented by test registers, for instance BILBOs and CBILBOs, which are inserted into the circuit structure at appropriate places. An algorithm is presented which is able to find the cost optimal placement of test registers for nearly all the ISCAS'89 sequential benchmark circuits, and a suboptimal solution with slightly higher cost is obtained for all the circuits within a few minutes of computing time. The algorithm can also be applied to the Minimum Feedback Vertex Set problem in partial scan design, and an optimal solution is found for all the benchmark circuits.

The resulting self-testable circuits are analyzed. It is found that often CBILBOs lead to a minimum hardware overhead and also simplify test scheduling and test control.}, url = {http://www.computer.org/csdl/proceedings/iccad/1995/7213/00/72130095-abs.html}, doi = {http://dx.doi.org/10.1109/ICCAD.1995.479998}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1995/ICCAD_StroeW1995.pdf} }

51. Synthesis of Iddq-Testable Circuits: Integrating Built-In Current Sensors
Wunderlich, H.-J., Herzog, M., Figueras, J., Carrasco, J. and Calderón, A.
Proceedings of the European Design & Test Conference (ED&TC'95), Paris, France, 6-9 March 1995, pp. 573-580
1995
DOI PDF 
Abstract: "On-Chip" Iddq testing by the incorporation of Built-In Current (BIC) sensors has some advantages over "off-chip" techniques. However, the integration of sensors poses analog design problems which are hard to be solved by a digital designer. The automatic incorporation of the sensors using parameterized BIC cells could be a promising alternative. The work reported here identifies partitioning criteria to guide the synthesis of Iddq-testable circuits. The circuit must be partitioned, such that the defective Iddq is observable, and the power supply voltage pertubation is within specified limits. In addition to these constraints, also cost criteria are considered: circuit extra delay, area overhead of the BIC sensors, connectivity costs of the test circuitry, and the test application time. The parameters are estimated based on logical as well as electrical level information on the target cell library to be used in the technology mapping phase of the synthesis process. The resulting cost function is optimized by an evolution-based algorithm. When run over large benchmark circuits our method gives significantly superior results to those obtained using simpler and less comprehensive partitioning methods.
BibTeX:
@inproceedings{WundeHFCC1995,
  author = {Wunderlich, Hans-Joachim and Herzog, M. and Figueras, Joan and Carrasco, J.A. and Calderón, A.},
  title = {{Synthesis of Iddq-Testable Circuits: Integrating Built-In Current Sensors}},
  booktitle = {Proceedings of the European Design & Test Conference (ED&TC'95)},
  year = {1995},
  pages = {573--580},
  abstract = {"On-Chip" Iddq testing by the incorporation of Built-In Current (BIC) sensors has some advantages over "off-chip" techniques. However, the integration of sensors poses analog design problems which are hard to be solved by a digital designer. The automatic incorporation of the sensors using parameterized BIC cells could be a promising alternative. The work reported here identifies partitioning criteria to guide the synthesis of Iddq-testable circuits. The circuit must be partitioned, such that the defective Iddq is observable, and the power supply voltage pertubation is within specified limits. In addition to these constraints, also cost criteria are considered: circuit extra delay, area overhead of the BIC sensors, connectivity costs of the test circuitry, and the test application time. The parameters are estimated based on logical as well as electrical level information on the target cell library to be used in the technology mapping phase of the synthesis process. The resulting cost function is optimized by an evolution-based algorithm. When run over large benchmark circuits our method gives significantly superior results to those obtained using simpler and less comprehensive partitioning methods.},
  doi = {http://dx.doi.org/10.1109/EDTC.1995.470342},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1995/ED&TC_WundeHFCC1995.pdf}
}
50. Built-In Test for Circuits with Scan Based on Reseeding of Multiple-Polynomial Linear Feedback Shift Registers
Hellebrand, S., Rajski, J., Tarnick, S., Venkataraman, S. and Courtois, B.
IEEE Transactions on Computers
Vol. 44(2), February 1995, pp. 223-233
1995
DOI URL PDF 
Keywords: Built-In Test; hardware test pattern generators; input test data compression and decompression; multiplepolynomial LFSR; reseeding; scan design
Abstract: In this paper, we propose a new scheme for Built-In Test (BIT) that uses Multiple-Polynomial Linear Feedback Shift Registers (MP-LFSRs). The same MP-LFSR that generates random patterns to cover easy to test faults is loaded with seeds to generate deterministic vectors for difficult to test faults. The seeds are obtained by solving systems of linear equations involving the seed variables for the positions where the test cubes have specified values. We demonstrate that MP-LFSRs produce sequences with significantly reduced probability of linear dependence compared to single polynomial LFSRs. We present a general method to determine the probability of encoding as a function of the number of specified bits in the test cube, the length of the LFSR and the number of polynomials. Theoretical analysis and experiments show that the probability of encoding a test cube with s specified bits in an s-stage LFSR with 16 polynomials is 1-10^-6. We then present the new BIT scheme that allows for an efficient encoding of the entire test set. Here the seeds are grouped according to the polynomial they use and an implicit polynomial identification reduces the number of extra bits per seed to one bit. The paperalso shows methods of processing the entire test set consisting of test cubes with varied number of specified bits. Experimental results show the tradeoffs between test data storage and test application time while maintaining complete fault coverage.
BibTeX:
@article{HelleRTVC1995,
  author = {Hellebrand, Sybille and Rajski, Janusz and Tarnick, Steffen and Venkataraman, Srikanth and Courtois, Bernard},
  title = {{Built-In Test for Circuits with Scan Based on Reseeding of Multiple-Polynomial Linear Feedback Shift Registers}},
  journal = {IEEE Transactions on Computers},
  publisher = {IEEE Computer Society},
  year = {1995},
  volume = {44},
  number = {2},
  pages = {223--233},
  keywords = {Built-In Test; hardware test pattern generators; input test data compression and decompression; multiplepolynomial LFSR; reseeding; scan design},
  abstract = {In this paper, we propose a new scheme for Built-In Test (BIT) that uses Multiple-Polynomial Linear Feedback Shift Registers (MP-LFSRs). The same MP-LFSR that generates random patterns to cover easy to test faults is loaded with seeds to generate deterministic vectors for difficult to test faults. The seeds are obtained by solving systems of linear equations involving the seed variables for the positions where the test cubes have specified values. We demonstrate that MP-LFSRs produce sequences with significantly reduced probability of linear dependence compared to single polynomial LFSRs. We present a general method to determine the probability of encoding as a function of the number of specified bits in the test cube, the length of the LFSR and the number of polynomials. Theoretical analysis and experiments show that the probability of encoding a test cube with s specified bits in an s-stage LFSR with 16 polynomials is 1-10^-6. We then present the new BIT scheme that allows for an efficient encoding of the entire test set. Here the seeds are grouped according to the polynomial they use and an implicit polynomial identification reduces the number of extra bits per seed to one bit. The paperalso shows methods of processing the entire test set consisting of test cubes with varied number of specified bits. Experimental results show the tradeoffs between test data storage and test application time while maintaining complete fault coverage.},
  url = {http://dl.acm.org/citation.cfm?id=203279.203285},
  doi = {http://dx.doi.org/10.1109/12.364534},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1995/TCAD_HelleRTVC1995.pdf}
}
49. A Unified Method for Assembling Global Test Schedules
Stroele, A.P. and Wunderlich, H.-J.
Proceedings of the 3rd Asian Test Symposium (ATS'94), Nara, Japan, 15-17 November 1994, pp. 268-273
1994
DOI PDF 
Keywords: Built-in self-test, data path, synthesis for testability, test scheduling
Abstract: In order to make a register transfer structure testable, it is usually divided into functional blocks that can be tested independently by various test methods. The test patterns are shifted in or generated autonomously at the inputs of each block. The test responses of a block are compacted or observed at its output register. In this paper a unified method for assembling all the single tests to a global schedule is presented. It is compatible with a variety of different test methods. The described scheduling procedures reduce the overall test time and minimize the number of internal registers that have to be made directly observable.
BibTeX:
@inproceedings{StroeW1994a,
  author = {Stroele, Albrecht P. and Wunderlich, Hans-Joachim},
  title = {{A Unified Method for Assembling Global Test Schedules}},
  booktitle = {Proceedings of the 3rd Asian Test Symposium (ATS'94)},
  year = {1994},
  pages = {268--273},
  keywords = {Built-in self-test, data path, synthesis for testability, test scheduling},
  abstract = {In order to make a register transfer structure testable, it is usually divided into functional blocks that can be tested independently by various test methods. The test patterns are shifted in or generated autonomously at the inputs of each block. The test responses of a block are compacted or observed at its output register. In this paper a unified method for assembling all the single tests to a global schedule is presented. It is compatible with a variety of different test methods. The described scheduling procedures reduce the overall test time and minimize the number of internal registers that have to be made directly observable.},
  doi = {http://dx.doi.org/10.1109/ATS.1994.367220},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1994/ATS_StroeW1994.pdf}
}
48. An Efficient Procedure for the Synthesis of Fast Self-Testable Controller Structures
Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the ACM/IEEE International Conference on Computer-Aided Design (ICCAD'94), San Jose, California, USA, 6-10 November 1994, pp. 110-116
1994
DOI URL PDF 
Abstract: The BIST implementation of a conventionally synthesized controller in most cases requires the integration of an additional register only for test purposes. This leads to some serious drawbacks concerning the fault coverage, the system speed and the area overhead. A synthesis technique is presented, which uses the additional test register also to implement the system function by supporting self-testable pipeline-like controller structures. It will be shown, that if the need of two different registers in the final structure is already taken into account during synthesis, then the overall number of flipflops can be reduced,and the fault coverage and the system speed can be enhanced. The presented algorithm constructs realizations of a given finite state machine specification which can be trivially implemented by a self-testable structure. The efficiency of the procedure is ensured by a very precise characterization of the space ofsuitable realizations, which avoids the computational overhead of previously published algorithms.
BibTeX:
@inproceedings{HelleW1994a,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{An Efficient Procedure for the Synthesis of Fast Self-Testable Controller Structures}},
  booktitle = {Proceedings of the ACM/IEEE International Conference on Computer-Aided Design (ICCAD'94)},
  publisher = {IEEE Computer Society},
  year = {1994},
  pages = {110--116},
  abstract = {The BIST implementation of a conventionally synthesized controller in most cases requires the integration of an additional register only for test purposes. This leads to some serious drawbacks concerning the fault coverage, the system speed and the area overhead. A synthesis technique is presented, which uses the additional test register also to implement the system function by supporting self-testable pipeline-like controller structures. It will be shown, that if the need of two different registers in the final structure is already taken into account during synthesis, then the overall number of flipflops can be reduced,and the fault coverage and the system speed can be enhanced. The presented algorithm constructs realizations of a given finite state machine specification which can be trivially implemented by a self-testable structure. The efficiency of the procedure is ensured by a very precise characterization of the space ofsuitable realizations, which avoids the computational overhead of previously published algorithms.},
  url = {http://dl.acm.org/citation.cfm?id=191326.191371},
  doi = {http://dx.doi.org/10.1109/ICCAD.1994.629752},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1994/ICCAD_HelleW1994a.pdf}
}
47. Simulation Results of an Efficient Defect Analysis Procedure
Stern, O. and Wunderlich, H.-J.
Proceedings of the 25th IEEE International Test Conference (ITC'94), Washington, DC, USA, 2-6 October 1994, pp. 729-738
1994
DOI URL PDF 
Abstract: For obtaining a zero defect level, a high fault coverage with respect to the stuck-at fault model is often not sufficient as there are many defects that show a more complex behavior. In this paper, a method is presented for computing the occurrence probabilities of certain defects and the realistic fault coverages for test sets. The method is highly efficient as a pre-processing step is used for partitioning the layout and extracting the defects ranked in the order of their occurrence probabilities.
The method was applied to a public domain library where defects causing a complex faulty behavior are possible. The occurrence probability of these faults was computed, and the defect coverage for different test sets was determined.
BibTeX:
@inproceedings{SternW1994,
  author = {Stern, Olaf and Wunderlich, Hans-Joachim},
  title = {{Simulation Results of an Efficient Defect Analysis Procedure}},
  booktitle = {Proceedings of the 25th IEEE International Test Conference (ITC'94)},
  publisher = {IEEE Computer Society},
  year = {1994},
  pages = {729--738},
  abstract = {For obtaining a zero defect level, a high fault coverage with respect to the stuck-at fault model is often not sufficient as there are many defects that show a more complex behavior. In this paper, a method is presented for computing the occurrence probabilities of certain defects and the realistic fault coverages for test sets. The method is highly efficient as a pre-processing step is used for partitioning the layout and extracting the defects ranked in the order of their occurrence probabilities.
The method was applied to a public domain library where defects causing a complex faulty behavior are possible. The occurrence probability of these faults was computed, and the defect coverage for different test sets was determined.}, url = {http://dl.acm.org/citation.cfm?id=648016.746228}, doi = {http://dx.doi.org/10.1109/TEST.1994.528019}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1994/ITC_SternW1994.pdf} }
46. Configuring Flip-Flops to BIST Registers
Stroele, A.P. and Wunderlich, H.-J.
Proceedings of the 25th IEEE International Test Conference (ITC'94), Washington, DC, USA, 2-6 October 1994, pp. 939-948
1994
DOI URL PDF 
Abstract: Built-in self-test test registers must segment a circuit such that there exists a feasible test schedule. If a register transfer description is used for selecting the positions of test registers, the space for optimization is small. In this paper, 1-bit test cells are inserted at gate level, and an initial test schedule is constructed. Based on the information of this schedule, test cells that can be controlled in the same way are assembled to test registers. Finally, a test schedule at RT level is constructed and a minimal set of test control signals is determined. The presented approach can reduce both BIST hardware overhead and test application time. It is applicable to control units and circuits produced by control oriented synthesis where an RT description is not available. Considerable gains can also be obtained if existing RT structures are reconfigured for self-testing in the described way.
BibTeX:
@inproceedings{StroeW1994,
  author = {Stroele, Albrecht P. and Wunderlich, Hans-Joachim},
  title = {{Configuring Flip-Flops to BIST Registers}},
  booktitle = {Proceedings of the 25th IEEE International Test Conference (ITC'94)},
  publisher = {IEEE Computer Society},
  year = {1994},
  pages = {939--948},
  abstract = {Built-in self-test test registers must segment a circuit such that there exists a feasible test schedule. If a register transfer description is used for selecting the positions of test registers, the space for optimization is small. In this paper, 1-bit test cells are inserted at gate level, and an initial test schedule is constructed. Based on the information of this schedule, test cells that can be controlled in the same way are assembled to test registers. Finally, a test schedule at RT level is constructed and a minimal set of test control signals is determined. The presented approach can reduce both BIST hardware overhead and test application time. It is applicable to control units and circuits produced by control oriented synthesis where an RT description is not available. Considerable gains can also be obtained if existing RT structures are reconfigured for self-testing in the described way.},
  url = {http://dl.acm.org/citation.cfm?id=648016.761091},
  doi = {http://dx.doi.org/10.1109/TEST.1994.528043},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1994/ITC_StroeW1994.pdf}
}
45. Synthesis of Self-Testable Controllers
Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the European Design Automation Conference (EDAC/ETC/EuroAsic'94), Paris, France, 28 February-3 March 1994, pp. 580-585
1994
DOI PDF 
Abstract: The paper presents a synthesis approach for pipeline-like controller structures. These structures allow to implement a built-in self-test in two sessions without any extra test registers. Hence the additional delay imposed by the testcircuitry is reduced, the fault coverage is increased, and in many cases the overall area is minimal, too. The self-testable structure for a given finite state machine specification is derived from an appropiate realization of the machine. A theorem is proven that such realizations can be constructed by means of partition pairs. An algorithm to determine the optimal realizations is developed and benchmark experiments are presented to demonstrate the applicability of the presented approach.
BibTeX:
@inproceedings{HelleW1994,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Synthesis of Self-Testable Controllers}},
  booktitle = {Proceedings of the European Design Automation Conference (EDAC/ETC/EuroAsic'94)},
  publisher = {IEEE Computer Society},
  year = {1994},
  pages = {580--585},
  abstract = {The paper presents a synthesis approach for pipeline-like controller structures. These structures allow to implement a built-in self-test in two sessions without any extra test registers. Hence the additional delay imposed by the testcircuitry is reduced, the fault coverage is increased, and in many cases the overall area is minimal, too. The self-testable structure for a given finite state machine specification is derived from an appropiate realization of the machine. A theorem is proven that such realizations can be constructed by means of partition pairs. An algorithm to determine the optimal realizations is developed and benchmark experiments are presented to demonstrate the applicability of the presented approach.},
  doi = {http://dx.doi.org/10.1109/EDTC.1994.326815},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1994/EDAC_HelleW1994.pdf}
}
44. An Efficient BIST Scheme Based on Reseeding of Multiple Polynomial Linear Feedback Shift Registers
Venkataraman, S., Rajski, J., Hellebrand, S. and Tarnick, S.
Proceedings of the ACM/IEEE International Conference on CAD-93 (ICCAD'93), Santa Clara, California, USA, 7-11 November 1993, pp. 572-577
1993
DOI PDF 
Abstract: In this paper we describe an optimized BIST scheme based on reseeding of multiple polynomial Linear Feedback Shift Registers (LFSRs). The same LFSR that is used to generate pseudo-random patterns, is loaded with seed from which it produces vectors that cover the testcube of difficult to test faults. The scheme is compatible with scan-design and achieves full coverage as it is based onrandom patterns combined with a deterministic test set. A method for processing the test set to allow for efficient encoding by the scheme is described. Algorithms for calculating LFSR seeds from the test set and for the selection and ordering of polynomials are described. Experimental results are provided for ISCAS-89 benchmark circuits to demonstrate the effectiveness of the scheme. The scheme allows an excellent trade-off between test data storage and test application time (number of test patterns) with a very small hardware overhead. We show the trade-off between test data storage and number of test patterns under the scheme.
BibTeX:
@inproceedings{VenkaRHT1993,
  author = {Venkataraman, Srikanth and Rajski, Janusz and Hellebrand, Sybille and Tarnick, Steffen},
  title = {{An Efficient BIST Scheme Based on Reseeding of Multiple Polynomial Linear Feedback Shift Registers}},
  booktitle = {Proceedings of the ACM/IEEE International Conference on CAD-93 (ICCAD'93)},
  year = {1993},
  pages = {572--577},
  abstract = {In this paper we describe an optimized BIST scheme based on reseeding of multiple polynomial Linear Feedback Shift Registers (LFSRs). The same LFSR that is used to generate pseudo-random patterns, is loaded with seed from which it produces vectors that cover the testcube of difficult to test faults. The scheme is compatible with scan-design and achieves full coverage as it is based onrandom patterns combined with a deterministic test set. A method for processing the test set to allow for efficient encoding by the scheme is described. Algorithms for calculating LFSR seeds from the test set and for the selection and ordering of polynomials are described. Experimental results are provided for ISCAS-89 benchmark circuits to demonstrate the effectiveness of the scheme. The scheme allows an excellent trade-off between test data storage and test application time (number of test patterns) with a very small hardware overhead. We show the trade-off between test data storage and number of test patterns under the scheme.},
  doi = {http://dx.doi.org/10.1109/ICCAD.1993.580117},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1993/ICCAD_VenkaRHT1993.pdf}
}
43. Generation of Vector Patterns through Reseeding of Multiple-Polynomial Linear Feedback Shift Registers
Hellebrand, S., Tarnick, S., Rajski, J. and Courtois, B.
Proceedings of the 23rd IEEE International Test Conference (ITC'92), Baltimore, Maryland, USA, 20-24 September 1992, pp. 120-129
1992
DOI URL PDF 
Abstract: In this paper we perform a comparative analysis of the encoding efficiency of BIST schemes based on reseeding of single polynomial LFSRs as well as LFSRs with fully programmable polynomials. Full programmability gives much better encoding efficiency. For a testcube with s carebits we need only s+4 bits in contrast to s+19 bits for reseeding of single polynomials, but since it involves solving systems of nonlinear equations it is not applicable to realistic cases. We propose a new BIST scheme where the generator can operate according to a number of primitve polynomials. The testcubes are encoded as the polynomial identifier and a seed. We present models of the encoding efficiency of this scheme and demonstrate, both theoretically and through extensive simulations, that such a scheme with 16 polynomials approaches the efficiency of the schemebased on full polynominal programmability, essentially preserving the computational simplicity of single reseeding.
BibTeX:
@inproceedings{HelleTTC1992,
  author = {Hellebrand, Sybille and Tarnick, Steffen and Rajski, Janusz and Courtois, Bernard},
  title = {{Generation of Vector Patterns through Reseeding of Multiple-Polynomial Linear Feedback Shift Registers}},
  booktitle = {Proceedings of the 23rd IEEE International Test Conference (ITC'92)},
  publisher = {IEEE Computer Society},
  year = {1992},
  pages = {120--129},
  abstract = {In this paper we perform a comparative analysis of the encoding efficiency of BIST schemes based on reseeding of single polynomial LFSRs as well as LFSRs with fully programmable polynomials. Full programmability gives much better encoding efficiency. For a testcube with s carebits we need only s+4 bits in contrast to s+19 bits for reseeding of single polynomials, but since it involves solving systems of nonlinear equations it is not applicable to realistic cases. We propose a new BIST scheme where the generator can operate according to a number of primitve polynomials. The testcubes are encoded as the polynomial identifier and a seed. We present models of the encoding efficiency of this scheme and demonstrate, both theoretically and through extensive simulations, that such a scheme with 16 polynomials approaches the efficiency of the schemebased on full polynominal programmability, essentially preserving the computational simplicity of single reseeding.},
  url = {http://dl.acm.org/citation.cfm?id=648014.745236},
  doi = {http://dx.doi.org/10.1109/TEST.1992.527812},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1992/ITC_HelleTTC1992.pdf}
}
42. Efficient Test Set Evaluation
Wunderlich, H.-J. and Warnecke, M.
Proceedings of the 3rd European Conference on Design Automation (EDAC'92), Brussels, Belgium, 16-19 March 1992, pp. 428-433
1992
DOI PDF 
Keywords: Test, fault simulation
Abstract: The fault coverage obtained by a set of test patterns is usually determined by expensive fault simulation. Even using fault dropping techniques fault simulation provides more information than actually needed. For each fault the pattern is determined which detects this fault first. This is mainly redundant information if diagnosis is not required. We can dispense with this high resolution and restrict our interest on the set of faults which is detected by a set of patterns. It is shown theoretically and practically that this information is obtainable in an highly efficient way.
BibTeX:
@inproceedings{WundeW1992,
  author = {Wunderlich, Hans-Joachim and Warnecke, M.},
  title = {{Efficient Test Set Evaluation}},
  booktitle = {Proceedings of the 3rd European Conference on Design Automation (EDAC'92)},
  year = {1992},
  pages = {428--433},
  keywords = {Test, fault simulation},
  abstract = {The fault coverage obtained by a set of test patterns is usually determined by expensive fault simulation. Even using fault dropping techniques fault simulation provides more information than actually needed. For each fault the pattern is determined which detects this fault first. This is mainly redundant information if diagnosis is not required. We can dispense with this high resolution and restrict our interest on the set of faults which is detected by a set of patterns. It is shown theoretically and practically that this information is obtainable in an highly efficient way.},
  doi = {http://dx.doi.org/10.1109/EDAC.1992.205970},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1992/EDAC_WundeW1992.pdf}
}
41. Optimized Synthesis Techniques for Testable Sequential Circuits
Eschermann, B. and Wunderlich, H.-J.
IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)
Vol. 11(3), March 1992, pp. 301-312
1992
DOI PDF 
Abstract: Innovative synthesis for testability strategies aim at considering testability while synthesizing a ciruit, whereas conventional design for testability methods modify the design after the circuit structure is synthesized. We describe a synthesis approach that maps a behavioral FSM description into a testable gate-level structure. The term "testable" in this context, besides implying the existence of tests, also means that the application of test patterns is facilitated. Depending on the test strategy, the state registers of the FSM are modified e.g. as scan path or self-test registers. The additional functionality of these state registers is utilized in system mode by interpreting them as "smart" state registers, capable of producing certain state transitions on their own. To make the best use of such registers, we propose a novel state encoding strategy based on an analytic formulation of the coding constraint satisfaction problem as a quadratic assignment problem. An additional minimization potential can be exploited by appropriately choosing the pattern generator for self-testable designs. Experimental results indicate that, compared with conventional design for testabiltiy approaches, significant savings are possible this way.
BibTeX:
@article{EscheW1992,
  author = {Eschermann, Bernhard and Wunderlich, Hans-Joachim},
  title = {{Optimized Synthesis Techniques for Testable Sequential Circuits}},
  journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)},
  year = {1992},
  volume = {11},
  number = {3},
  pages = {301--312},
  abstract = {Innovative synthesis for testability strategies aim at considering testability while synthesizing a ciruit, whereas conventional design for testability methods modify the design after the circuit structure is synthesized. We describe a synthesis approach that maps a behavioral FSM description into a testable gate-level structure. The term "testable" in this context, besides implying the existence of tests, also means that the application of test patterns is facilitated. Depending on the test strategy, the state registers of the FSM are modified e.g. as scan path or self-test registers. The additional functionality of these state registers is utilized in system mode by interpreting them as "smart" state registers, capable of producing certain state transitions on their own. To make the best use of such registers, we propose a novel state encoding strategy based on an analytic formulation of the coding constraint satisfaction problem as a quadratic assignment problem. An additional minimization potential can be exploited by appropriately choosing the pattern generator for self-testable designs. Experimental results indicate that, compared with conventional design for testabiltiy approaches, significant savings are possible this way.},
  doi = {http://dx.doi.org/10.1109/43.124417},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1992/TCAD_EscheW1992.pdf}
}
40. Erfassung und Modellierung komplexer Funktionsfehler in Mikroelektronik-Bauelementen
Stern, O. and Wunderlich, H.-J.
5. ITG-Fachtagung Mikroelektronik für die Informationstechnik
Vol. 119, March 1992, pp. 117-122
1992
DOI PDF 
Keywords: Fertigungstest, Fehlermodellierung, Ausbeute, Testerzeugung
Abstract: Es wird ein Verfahren vorgestellt, das für die Grundzellen einer Zellbibliothek layoutabhängig die möglichen Fehlfunktionen bestimmt, die durch Fertigungsfehler verursacht werden können. Eingabe für das Verfahren sind neben dem Layout einer Zelle die Prozessparameter und die Defektverteilungen. Ausgabe sind die realistischen Fehlfunktionen mit ihren Auftrittswahrscheinlichkeiten. Damit können Testerzeugung und Testablauf beschleunigt, schwer testbare Fehler bestimmt und ihre Ursachen lokalisiert und beseitigt werden.
BibTeX:
@inproceedings{SternW1992,
  author = {Stern, Olaf and Wunderlich, Hans-Joachim},
  title = {{Erfassung und Modellierung komplexer Funktionsfehler in Mikroelektronik-Bauelementen}},
  booktitle = {5. ITG-Fachtagung Mikroelektronik für die Informationstechnik},
  publisher = {VDE-Verlag Stuttgart},
  year = {1992},
  volume = {119},
  pages = {117--122},
  keywords = {Fertigungstest, Fehlermodellierung, Ausbeute, Testerzeugung},
  abstract = {Es wird ein Verfahren vorgestellt, das für die Grundzellen einer Zellbibliothek layoutabhängig die möglichen Fehlfunktionen bestimmt, die durch Fertigungsfehler verursacht werden können. Eingabe für das Verfahren sind neben dem Layout einer Zelle die Prozessparameter und die Defektverteilungen. Ausgabe sind die realistischen Fehlfunktionen mit ihren Auftrittswahrscheinlichkeiten. Damit können Testerzeugung und Testablauf beschleunigt, schwer testbare Fehler bestimmt und ihre Ursachen lokalisiert und beseitigt werden.},
  doi = {http://dx.doi.org/10.18419/opus-7903},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1992/ITG_SternW1992.pdf}
}
39. Prüfgerechter Entwurf und Test hochintegrierter Schaltungen
Wunderlich, H.-J. and Schulz, M.H.
Informatik-Spektrum
Vol. 15(1), March 1992, pp. 23-32
1992
DOI PDF 
Keywords: Prüfgerechter Entwurf, Test, Selbsttest, hochintegrierte Schaltungen
Abstract: Der Beitrag gibt einen Überblick über die wichtigsten praxisrelevanten Teststrategien, wobei unter einer Teststrategie nicht nur die Verfahren zur Testsatzerzeugung und zur eigentlichen Testdurchführung, sondern auch das zugrunde liegende Fehlermodell und die erforderlichen testfreundlichen Entwurfsmaßnahmen, die die Voraussetzung für die Anwendung dieser Verfahren darstellen, zu verstehen sind. Es werden die gängisten Methoden zum konventionellen externen Test vorgestellt und bewertet sowie das Prinzip der immer breitere Anwendung findenden Selbsttestmethoden und ihre Vorteile erläurtert. Nach einem kurzen Ausblick auf die Fortschritte, die Verfahren zur automatischen Synthese testbarer Schaltungen erhoffen lassen, werden schließlich Aspekte des Systemtests und insbesondere das Boundary-Scan-Prinzip und die damit verbundenen Vorteile diskutiert.
BibTeX:
@article{WundeS1992,
  author = {Wunderlich, Hans-Joachim and Schulz, Michael H.},
  title = {{Prüfgerechter Entwurf und Test hochintegrierter Schaltungen}},
  journal = {Informatik-Spektrum},
  year = {1992},
  volume = {15},
  number = {1},
  pages = {23--32},
  keywords = {Prüfgerechter Entwurf, Test, Selbsttest, hochintegrierte Schaltungen},
  abstract = {Der Beitrag gibt einen Überblick über die wichtigsten praxisrelevanten Teststrategien, wobei unter einer Teststrategie nicht nur die Verfahren zur Testsatzerzeugung und zur eigentlichen Testdurchführung, sondern auch das zugrunde liegende Fehlermodell und die erforderlichen testfreundlichen Entwurfsmaßnahmen, die die Voraussetzung für die Anwendung dieser Verfahren darstellen, zu verstehen sind. Es werden die gängisten Methoden zum konventionellen externen Test vorgestellt und bewertet sowie das Prinzip der immer breitere Anwendung findenden Selbsttestmethoden und ihre Vorteile erläurtert. Nach einem kurzen Ausblick auf die Fortschritte, die Verfahren zur automatischen Synthese testbarer Schaltungen erhoffen lassen, werden schließlich Aspekte des Systemtests und insbesondere das Boundary-Scan-Prinzip und die damit verbundenen Vorteile diskutiert.},
  doi = {http://dx.doi.org/10.18419/opus-7897},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1992/IS_WundeS1992.pdf}
}
38. The Pseudoexhaustive Test of Sequential Circuits
Wunderlich, H.-J. and Hellebrand, S.
IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)
Vol. 11(1), January 1992, pp. 26-33
1992
DOI PDF 
Abstract: The concept of a pseudoexhaustive test for sequential circuits is introduced in a way similar to that which is used for combinational networks. Using partial scan all cycles in the data flow of a sequential circuit are removed, such that a compact combinational model can be constructed. Pseudoexhaustive test sequences for the original circuit are constructed from a pseudoexhaustive test set for this model. To make this concept feasible for arbitrary circuits a technique for circuit segmentation is presented which provides special segmentation cells as well as the corresponding algorithms for the automatic placement of the cells. Example circuits show that the presented test strategyrequires less additional silicon area than a complete scan path. Thus the advantages of a partial scan path are combined with the well-known benefits of a pseudoexhaustive test, such as high fault coverage and simplified test generation.
BibTeX:
@article{WundeH1992,
  author = {Wunderlich, Hans-Joachim and Hellebrand, Sybille},
  title = {{The Pseudoexhaustive Test of Sequential Circuits}},
  journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)},
  year = {1992},
  volume = {11},
  number = {1},
  pages = {26--33},
  abstract = {The concept of a pseudoexhaustive test for sequential circuits is introduced in a way similar to that which is used for combinational networks. Using partial scan all cycles in the data flow of a sequential circuit are removed, such that a compact combinational model can be constructed. Pseudoexhaustive test sequences for the original circuit are constructed from a pseudoexhaustive test set for this model. To make this concept feasible for arbitrary circuits a technique for circuit segmentation is presented which provides special segmentation cells as well as the corresponding algorithms for the automatic placement of the cells. Example circuits show that the presented test strategyrequires less additional silicon area than a complete scan path. Thus the advantages of a partial scan path are combined with the well-known benefits of a pseudoexhaustive test, such as high fault coverage and simplified test generation.},
  doi = {http://dx.doi.org/10.1109/43.108616},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1992/TCAD_WundeH1992.pdf}
}
37. A Common Approach to Test Generation and Hardware Verification Based on Temporal Logic
Kropf, T. and Wunderlich, H.-J.
Proceedings of the 22nd IEEE International Test Conference (ITC'91), Nashville, Tennessee, USA, 26-30 October 1991, pp. 57-66
1991
DOI URL PDF 
Abstract: Hardware verification and sequential test generation are aspects of the same problem, namely to prove the equal behavior determined by two circuit descriptions. During test generation, this attempt succeeds for the faulty and fault free circuit if redundancy exists, and during verification it succeeds, if the implementation is correct with regard to its specification. This observation can be used to cross-fertilize both areas, which have been treated separately up to now. In this paper, a common formal framework for hardware verification and sequential test pattern generation is presented, which is based on modeling the circuit behavior with temporal logic. In addition, a new approach to cope with non resetable flipflops in sequential test generation is proposed, which is not restricted to stuck-at faults. Based on this verification view, it is possible to provide the designer with one tool for checking circuit correctness and generating test patterns. Its first impelmentation and application is also described.
BibTeX:
@inproceedings{KropfW1991,
  author = {Kropf, Thomas and Wunderlich, Hans-Joachim},
  title = {{A Common Approach to Test Generation and Hardware Verification Based on Temporal Logic}},
  booktitle = {Proceedings of the 22nd IEEE International Test Conference (ITC'91)},
  publisher = {IEEE Computer Society},
  year = {1991},
  pages = {57--66},
  abstract = {Hardware verification and sequential test generation are aspects of the same problem, namely to prove the equal behavior determined by two circuit descriptions. During test generation, this attempt succeeds for the faulty and fault free circuit if redundancy exists, and during verification it succeeds, if the implementation is correct with regard to its specification. This observation can be used to cross-fertilize both areas, which have been treated separately up to now. In this paper, a common formal framework for hardware verification and sequential test pattern generation is presented, which is based on modeling the circuit behavior with temporal logic. In addition, a new approach to cope with non resetable flipflops in sequential test generation is proposed, which is not restricted to stuck-at faults. Based on this verification view, it is possible to provide the designer with one tool for checking circuit correctness and generating test patterns. Its first impelmentation and application is also described.},
  url = {http://dl.acm.org/citation.cfm?id=648013.744749},
  doi = {http://dx.doi.org/10.1109/TEST.1991.519494},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1991/ITC_KropfW1991.pdf}
}
36. Emulation of Scan Paths in Sequential Circuit Synthesis
Eschermann, B. and Wunderlich, H.-J.
Proceedings of the 5th International GI/ITG/GMA Conference on Fault-Tolerant Computing Systems, Tests, Diagnosis, Fault Treatment, Nürnberg, Germany, September 1991, pp. 136-147
1991
DOI URL PDF 
Abstract: Scan paths are generally added to a sequential circuit in a final design for testability step. We present an approach to incorporate the behavior of a scan path during circuit synthesis, thus avoiding to implement the scan path shift register as a separate structural entity. The shift transitions of the scan path are treated as a part of the system functionality. Depending on the minimization strategy for the system logic, either the delay or the area of the circuit can be reduced compared to a conventional scan path. which may be interpreted as a special case of realizing the combinational logic. The approach is also extended to partial scan paths. It is shown that the resulting structure is fully testable and test patterns can be efficiently produced by a combinational test generator. The advantages of the approach are illustrated with a collection of finite state machine examples.
BibTeX:
@inproceedings{EscheW1991b,
  author = {Eschermann, Bernhard and Wunderlich, Hans-Joachim},
  title = {{Emulation of Scan Paths in Sequential Circuit Synthesis}},
  booktitle = {Proceedings of the 5th International GI/ITG/GMA Conference on Fault-Tolerant Computing Systems, Tests, Diagnosis, Fault Treatment},
  publisher = {Springer-Verlag},
  year = {1991},
  pages = {136--147},
  abstract = {Scan paths are generally added to a sequential circuit in a final design for testability step. We present an approach to incorporate the behavior of a scan path during circuit synthesis, thus avoiding to implement the scan path shift register as a separate structural entity. The shift transitions of the scan path are treated as a part of the system functionality. Depending on the minimization strategy for the system logic, either the delay or the area of the circuit can be reduced compared to a conventional scan path. which may be interpreted as a special case of realizing the combinational logic. The approach is also extended to partial scan paths. It is shown that the resulting structure is fully testable and test patterns can be efficiently produced by a combinational test generator. The advantages of the approach are illustrated with a collection of finite state machine examples.},
  url = {http://dl.acm.org/citation.cfm?id=646859.759726},
  doi = {http://dx.doi.org/10.18419/opus-7904},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1991/FTCS_EscheW1991.pdf}
}
35. TESTCHIP: A Chip for Weighted Random Pattern Generation, Evaluation, and Test Control
Ströle, A.P. and Wunderlich, H.-J.
IEEE Journal of Solid-State Circuits
Vol. 26(7), July 1991, pp. 1056-1063
1991
DOI PDF 
Keywords: Built-off test, low-cost test, multiple weights, random test, test equipment
Abstract: In self-testable circuits additional hardware is incorporated for generating test patterns and evaluating test responses. In this paper a built-off test strategy is presented which moves the additional hardware to a programmable extra chip. This is a low-cost test strategy in three ways:
1) the use of random patterns eliminates the expensive test pattern computation;
2) a microcomputer and an ASIC replace the expensive automatic test equipment; and
3) the design for testability overheads are minimized.
The presented ASIC generates random patterns, applies them to a circuit under test, and evaluates the test responses by signature analysis. It contains a hardware structure that can produce weighted random patterns corresponding to multiple programmable distributions. These patterns give a high fault coverage and allow short test lengths. A wide range of circuits can be tested as the only requirement is a scan path and no other test structures have to be built in.
BibTeX:
@article{StroelW1991,
  author = {Ströle, Albrecht P. and Wunderlich, Hans-Joachim},
  title = {{TESTCHIP: A Chip for Weighted Random Pattern Generation, Evaluation, and Test Control}},
  journal = {IEEE Journal of Solid-State Circuits},
  year = {1991},
  volume = {26},
  number = {7},
  pages = {1056--1063},
  keywords = {Built-off test, low-cost test, multiple weights, random test, test equipment},
  abstract = {In self-testable circuits additional hardware is incorporated for generating test patterns and evaluating test responses. In this paper a built-off test strategy is presented which moves the additional hardware to a programmable extra chip. This is a low-cost test strategy in three ways:
1) the use of random patterns eliminates the expensive test pattern computation;
2) a microcomputer and an ASIC replace the expensive automatic test equipment; and
3) the design for testability overheads are minimized.
The presented ASIC generates random patterns, applies them to a circuit under test, and evaluates the test responses by signature analysis. It contains a hardware structure that can produce weighted random patterns corresponding to multiple programmable distributions. These patterns give a high fault coverage and allow short test lengths. A wide range of circuits can be tested as the only requirement is a scan path and no other test structures have to be built in.}, doi = {http://dx.doi.org/10.1109/4.92026}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1991/IJSSC_StroeW1991.pdf} }
34. Signature Analysis and Test Scheduling for Self-Testable Circuits
Ströle, A.P. and Wunderlich, H.-J.
Proceedings of the 21st International Symposium on Fault-Tolerant Computing (FTCS-21), Montreal, Canada, 25-27 June 1991, pp. 96-103
1991
DOI PDF 
Keywords: Aliasing, built-in self-test, signature analysis, test scheduling
Abstract: Usually in complex circuits the test execution is divided into a number of subtasks, each producing a signature in a self-test register. These signatures influence one another. In this paper it is shown how test schedules can be constructed, in order to minimize the number of signatures to be evaluated. The error masking probabilities decrease, when the subtasks of the test execution are repeated in an appropriate order, and an equilibrium situation is reached where the error masking probabilities are minimal. A method is presented for constructing test schedules such that only the signatures at the primary outputs must be evaluated to get a sufficient fault coverage. Then no internal scan path is required, only few signatures have to be evaluated at the end of the test execution, and the test control at chip and board level is simplified. The amount of hardware to implement a built-in self-test is reduced significantly.
BibTeX:
@inproceedings{StroelW1991a,
  author = {Ströle, Albrecht P. and Wunderlich, Hans-Joachim},
  title = {{Signature Analysis and Test Scheduling for Self-Testable Circuits}},
  booktitle = {Proceedings of the 21st International Symposium on Fault-Tolerant Computing (FTCS-21)},
  year = {1991},
  pages = {96--103},
  keywords = {Aliasing, built-in self-test, signature analysis, test scheduling},
  abstract = {Usually in complex circuits the test execution is divided into a number of subtasks, each producing a signature in a self-test register. These signatures influence one another. In this paper it is shown how test schedules can be constructed, in order to minimize the number of signatures to be evaluated. The error masking probabilities decrease, when the subtasks of the test execution are repeated in an appropriate order, and an equilibrium situation is reached where the error masking probabilities are minimal. A method is presented for constructing test schedules such that only the signatures at the primary outputs must be evaluated to get a sufficient fault coverage. Then no internal scan path is required, only few signatures have to be evaluated at the end of the test execution, and the test control at chip and board level is simplified. The amount of hardware to implement a built-in self-test is reduced significantly.},
  doi = {http://dx.doi.org/10.1109/FTCS.1991.146640},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1991/FTCS_StroeW1991.pdf}
}
33. Maximizing the Fault Coverage in Complex Circuits by Minimal Number of Signatures
Wunderlich, H.-J. and Ströle, A.P.
Proceedings of the IEEE International Sympoisum on Circuits and Systems (ISCAS'91)
Vol. 3, Singapur, 11-14 June 1991, pp. 1881-1884
1991
DOI PDF 
Keywords: BIST, signature analysis, test scheduling
Abstract: Many self-test strategies use signature analysis to compress the test responses. In complex circuits the test execution is divided into a number of subtasks, each producing a signature in a self-test register. Whereas the conventional approach is to evaluate all these signatures, this paper presents methods to minimize the number of evaluated signatures without reducing the fault coverage. This is possible, since the signatures can influence one another during the test execution. For a fixed test schedule a minimal subset of signatures can be selected, and for a predetermined minimal subset of signatures the test schedule can be constructed such that the fault coverage is maximum. Both approaches result in significant hardware savings when a self-test is implemented.
BibTeX:
@inproceedings{WundeS1991,
  author = {Wunderlich, Hans-Joachim and Ströle, Albrecht P.},
  title = {{Maximizing the Fault Coverage in Complex Circuits by Minimal Number of Signatures}},
  booktitle = {Proceedings of the IEEE International Sympoisum on Circuits and Systems (ISCAS'91)},
  year = {1991},
  volume = {3},
  pages = {1881--1884},
  keywords = {BIST, signature analysis, test scheduling},
  abstract = {Many self-test strategies use signature analysis to compress the test responses. In complex circuits the test execution is divided into a number of subtasks, each producing a signature in a self-test register. Whereas the conventional approach is to evaluate all these signatures, this paper presents methods to minimize the number of evaluated signatures without reducing the fault coverage. This is possible, since the signatures can influence one another during the test execution. For a fixed test schedule a minimal subset of signatures can be selected, and for a predetermined minimal subset of signatures the test schedule can be constructed such that the fault coverage is maximum. Both approaches result in significant hardware savings when a self-test is implemented.},
  doi = {http://dx.doi.org/10.1109/ISCAS.1991.176774},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1991/ISCAS_WundeS1991.pdf}
}
32. A Unified Approach for the Synthesis of Self-Testable Finite State Machines
Eschermann, B. and Wunderlich, H.-J.
Proceedings of the 28th ACM/IEEE Design Automation Conference (DAC'91), San Francisco, California, USA, June 1991, pp. 372-377
1991
DOI URL PDF 
Abstract: Conventionally self-test hardware is added after synthesis is completed. For highly sequential circuits like controllers this design method either leads to high hardware overheads or compromises fault coverage. In this paper we outline a unified approach for considering self-test hardware like pattern generators and signature registers during synthesis. Three novel target structures are presented, and a method for designing parallel self-test circuits is discussed in more detail. For a collection of benchmark circuits we show that hardware overheads for self-testable circuits can be significantly reduced this way without sacrificing testability.
BibTeX:
@inproceedings{EscheW1991,
  author = {Eschermann, Bernhard and Wunderlich, Hans-Joachim},
  title = {{A Unified Approach for the Synthesis of Self-Testable Finite State Machines}},
  booktitle = {Proceedings of the 28th ACM/IEEE Design Automation Conference (DAC'91)},
  year = {1991},
  pages = {372--377},
  abstract = {Conventionally self-test hardware is added after synthesis is completed. For highly sequential circuits like controllers this design method either leads to high hardware overheads or compromises fault coverage. In this paper we outline a unified approach for considering self-test hardware like pattern generators and signature registers during synthesis. Three novel target structures are presented, and a method for designing parallel self-test circuits is discussed in more detail. For a collection of benchmark circuits we show that hardware overheads for self-testable circuits can be significantly reduced this way without sacrificing testability.},
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=979744},
  doi = {http://dx.doi.org/10.1145/127601.127697},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1991/DAC_EscheW1991.pdf}
}
31. Parallel Self-Test and the Synthesis of Control Units
Eschermann, B. and Wunderlich, H.-J.
Proceedings of the 2nd European Test Conference (ETC'91), Munich, Germany, 10-12 April 1991, pp. 73-82
1991
DOI PDF 
Abstract: Most self-test techniques are implemented with so-called multifunctional test registers at any specific time either used for pattern generation or for response analysis. In a parallel self-test, however, test registers are used for pattern generation and response analysis simultaneously. In this paper a novel circuit structure for controllers with parallel self-test is presented, which does not result in a loss of fault coverage. By using a dedicated synthesis procedure, which considers the self-test hardware while generating the circuit structure instead of adding it after the design is completed ("synthesis for testability") the self-test overhead can be kept low. The structure also facilitates realistic dynamic tests. As an example to illustrate the approach, the IEEE boundary scan controller is used.
BibTeX:
@inproceedings{EscheW1991a,
  author = {Eschermann, Bernhard and Wunderlich, Hans-Joachim},
  title = {{Parallel Self-Test and the Synthesis of Control Units}},
  booktitle = {Proceedings of the 2nd European Test Conference (ETC'91)},
  year = {1991},
  pages = {73--82},
  abstract = {Most self-test techniques are implemented with so-called multifunctional test registers at any specific time either used for pattern generation or for response analysis. In a parallel self-test, however, test registers are used for pattern generation and response analysis simultaneously. In this paper a novel circuit structure for controllers with parallel self-test is presented, which does not result in a loss of fault coverage. By using a dedicated synthesis procedure, which considers the self-test hardware while generating the circuit structure instead of adding it after the design is completed ("synthesis for testability") the self-test overhead can be kept low. The structure also facilitates realistic dynamic tests. As an example to illustrate the approach, the IEEE boundary scan controller is used.},
  doi = {http://dx.doi.org/10.18419/opus-7920},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1991/ETC_EscheW1991.pdf}
}
30. TESTCHIP: A Chip for Weighted Random Pattern Generation, Evaluation, and Test Control
Ströle, A.P., Wunderlich, H.-J. and Haberl, O.F.
Proceedings of the 16th European Solid-State Circuits Conference (ESSCIRC'90)
Vol. 1, Grenoble, France, 19-21 September 1990, pp. 101-104
1990
DOI URL PDF 
Abstract: A chip is presented that generates weighted random patterns, applies them to a circuit under test and evaluates the test responses. The generated test patterns correspond to multiple sets of weights. Test response evaluation is done by signature analysis. The chip can easily be connected to a micro computer and thus constitutes the key element of a low-cost test equipment.
BibTeX:
@inproceedings{StroelWH1990,
  author = {Ströle, Albrecht P. and Wunderlich, Hans-Joachim and Haberl, Oliver F.},
  title = {{TESTCHIP: A Chip for Weighted Random Pattern Generation, Evaluation, and Test Control}},
  booktitle = {Proceedings of the 16th European Solid-State Circuits Conference (ESSCIRC'90)},
  publisher = {IEEE Computer Society},
  year = {1990},
  volume = {1},
  pages = {101--104},
  abstract = {A chip is presented that generates weighted random patterns, applies them to a circuit under test and evaluates the test responses. The generated test patterns correspond to multiple sets of weights. Test response evaluation is done by signature analysis. The chip can easily be connected to a micro computer and thus constitutes the key element of a low-cost test equipment.},
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5467978},
  doi = {http://dx.doi.org/10.18419/opus-7921},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1990/ESSCIRC_StroeWH1990.pdf}
}
29. Generating Pseudo-Exhaustive Vectors for External Testing
Hellebrand, S., Wunderlich, H.-J. and Haberl, O.F.
Proceedings of the 21st IEEE International Test Conference (ITC'90), Washington, DC, USA, 10-14 September 1990, pp. 670-679
1990
DOI PDF 
Keywords: pseudo-exhaustive test, built-off test, external low cost test
Abstract: In the past years special chips for external test have been sucessfully used for random pattern testing. In this paper a technique is presented to combine the advantages of such a low cost test with the advantages of pseudo-exhaustive testing, which are an enhanced fault coverage and a simplified test pattern generation.
To achieve this goal two tasks are solved. Firstly, an algorithm is developed for pseudo-exhaustive test pattern generation, which ensures a feasible test length. Secondly, a chip design for applying these test patterns to a device under test is presented. The chip is programmed by the output of the presentedalgorithm and controls the entire test. The technique is first applied to devices with a scan path and then extended to sequential circuits. A large number of benchmark circuits have been investigated, and the results are presented.
BibTeX:
@inproceedings{HelleWH1990,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim and Haberl, Oliver F.},
  title = {{Generating Pseudo-Exhaustive Vectors for External Testing}},
  booktitle = {Proceedings of the 21st IEEE International Test Conference (ITC'90)},
  year = {1990},
  pages = {670--679},
  keywords = {pseudo-exhaustive test, built-off test, external low cost test},
  abstract = {In the past years special chips for external test have been sucessfully used for random pattern testing. In this paper a technique is presented to combine the advantages of such a low cost test with the advantages of pseudo-exhaustive testing, which are an enhanced fault coverage and a simplified test pattern generation.
To achieve this goal two tasks are solved. Firstly, an algorithm is developed for pseudo-exhaustive test pattern generation, which ensures a feasible test length. Secondly, a chip design for applying these test patterns to a device under test is presented. The chip is programmed by the output of the presentedalgorithm and controls the entire test. The technique is first applied to devices with a scan path and then extended to sequential circuits. A large number of benchmark circuits have been investigated, and the results are presented.}, doi = {http://dx.doi.org/10.1109/TEST.1990.114082}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1990/ITC_HelleWH1990.pdf} }
28. Error Masking in Self-Testable Circuits
Stroele, A.P. and Wunderlich, H.-J.
Proceedings of the 21st IEEE International Test Conference (ITC'90), Washington, DC, USA, 10-14 September 1990, pp. 544-552
1990
DOI PDF 
Keywords: Built-in self-test, error masking, fault coverage, signature analysis
Abstract: In a self-test environment signature analysis is used to compact the test responses. In large circuits the test execution is divided into a number of subtasks each producing a signature in a self-test register. Aliasing occurs, if a faulty response sequence leads to a correct signature in a signature register. Aliasing probabilities for single signature registers are widely investigated.
In this paper the effects of error masking in a multitude of signature registers are analysed. It is shown that a self-test can always be scheduled such that evaluating signatures only at the end of the complete test execution is sufficient. A method is presented to comute the probability that a fault leads to at least one faulty signature in a set of self-test registers. This method allows the computation of the fault coverage with respect to the complete test execution. A minimal subset of all self-test registers can be selected, so that only the signatures of these self-test registers have to be evalutated and the fault coverage is almost not affected.
The benefits of the approach are a smaller number of self-test registers in the scan path, a smaller number of signatures to be evaluated, a simplified test control unit, and hence a significant reduction of the hardware required for built-in self-test structures.
BibTeX:
@inproceedings{StroeW1990,
  author = {Stroele, Albrecht P. and Wunderlich, Hans-Joachim},
  title = {{Error Masking in Self-Testable Circuits}},
  booktitle = {Proceedings of the 21st IEEE International Test Conference (ITC'90)},
  year = {1990},
  pages = {544--552},
  keywords = {Built-in self-test, error masking, fault coverage, signature analysis},
  abstract = {In a self-test environment signature analysis is used to compact the test responses. In large circuits the test execution is divided into a number of subtasks each producing a signature in a self-test register. Aliasing occurs, if a faulty response sequence leads to a correct signature in a signature register. Aliasing probabilities for single signature registers are widely investigated. 
In this paper the effects of error masking in a multitude of signature registers are analysed. It is shown that a self-test can always be scheduled such that evaluating signatures only at the end of the complete test execution is sufficient. A method is presented to comute the probability that a fault leads to at least one faulty signature in a set of self-test registers. This method allows the computation of the fault coverage with respect to the complete test execution. A minimal subset of all self-test registers can be selected, so that only the signatures of these self-test registers have to be evalutated and the fault coverage is almost not affected.
The benefits of the approach are a smaller number of self-test registers in the scan path, a smaller number of signatures to be evaluated, a simplified test control unit, and hence a significant reduction of the hardware required for built-in self-test structures.}, doi = {http://dx.doi.org/10.1109/TEST.1990.114066}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1990/ITC_StroeW1990.pdf} }
27. Optimized Synthesis of Self-Testable Finite State Machines
Eschermann, B. and Wunderlich, H.-J.
Proceedings of the 20th International Symposium on Fault-Tolerant Computing (FTCS-20), Newcastle Upon Tyne, United Kingdom, 26-28 June 1990, pp. 390-397
1990
DOI PDF 
Keywords: VLSI design validation, synthesis for testability, sequential circuits, built-in self-test
Abstract: In this paper a synthesis procedure for self-testable finite state machines is presented. Testability is already considered while transforming the behavioral description of the circiuit into a structural description. To this end a novel state encoding algorithm as well as a modified self-test architecture are developed. Experimental results show that this approach leads to a significant reduction of hardware overhead.
BibTeX:
@inproceedings{EscheW1990,
  author = {Eschermann, Bernhard and Wunderlich, Hans-Joachim},
  title = {{Optimized Synthesis of Self-Testable Finite State Machines}},
  booktitle = {Proceedings of the 20th International Symposium on Fault-Tolerant Computing (FTCS-20)},
  year = {1990},
  pages = {390--397},
  keywords = {VLSI design validation, synthesis for testability, sequential circuits, built-in self-test},
  abstract = {In this paper a synthesis procedure for self-testable finite state machines is presented. Testability is already considered while transforming the behavioral description of the circiuit into a structural description. To this end a novel state encoding algorithm as well as a modified self-test architecture are developed. Experimental results show that this approach leads to a significant reduction of hardware overhead.},
  doi = {http://dx.doi.org/10.1109/FTCS.1990.89393},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1990/FTCS_EscheW1990.pdf}
}
26. An Analytical Approach to the Partial Scan Problem
Kunzmann, A. and Wunderlich, H.-J.
Journal of Electronic Testing: Theory and Applications (JETTA)
Vol. 1(2), June 1990, pp. 163-174
1990
DOI URL PDF 
Keywords: design for testability, partial scan path, sequential test generation
Abstract: The scan design is the most widely used technique to ensure the testability of sequential circuits. In this article it is shown that testability is still guaranteed, even if only a small part of flipflops are integrated into a scan path. An algorithm is presented for selecting a minimal number of flipflops, which must be directly accessible. The direct accessibility ensures that, for each fault, the necessary test sequence is bounded linearly in the circuit size. Since the underlying problem is NP-complete, efficient heuristics are implemented to compute suboptimal solutions. Moreover, a new algorithm is presented to map a sequential circuit into a minimal combinational one, such that test pattern generation for both circuit representations is equivalent and the fast combinational ATPG methods can be applied. For all benchmark circuits investigated, the approach results in a significant reduction of the hardware overhead, and additionally a complete fault coverage is still obtained. Amazingly the overall test application time decreases in comparison with a complete scan path, since the width of the shifted patterns is shorter, and the number of patterns increase only to a small extent.
BibTeX:
@article{KunzmW1990,
  author = {Kunzmann, Arno and Wunderlich, Hans-Joachim},
  title = {{An Analytical Approach to the Partial Scan Problem}},
  journal = {Journal of Electronic Testing: Theory and Applications (JETTA)},
  publisher = {Springer-Verlag},
  year = {1990},
  volume = {1},
  number = {2},
  pages = {163--174},
  keywords = {design for testability, partial scan path, sequential test generation},
  abstract = {The scan design is the most widely used technique to ensure the testability of sequential circuits. In this article it is shown that testability is still guaranteed, even if only a small part of flipflops are integrated into a scan path. An algorithm is presented for selecting a minimal number of flipflops, which must be directly accessible. The direct accessibility ensures that, for each fault, the necessary test sequence is bounded linearly in the circuit size. Since the underlying problem is NP-complete, efficient heuristics are implemented to compute suboptimal solutions. Moreover, a new algorithm is presented to map a sequential circuit into a minimal combinational one, such that test pattern generation for both circuit representations is equivalent and the fast combinational ATPG methods can be applied. For all benchmark circuits investigated, the approach results in a significant reduction of the hardware overhead, and additionally a complete fault coverage is still obtained. Amazingly the overall test application time decreases in comparison with a complete scan path, since the width of the shifted patterns is shorter, and the number of patterns increase only to a small extent.},
  url = {http://dl.acm.org/citation.cfm?id=83151.84870},
  doi = {http://dx.doi.org/10.1007/BF00137392},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1990/JETTA_KunzmW1990.pdf}
}
25. Multiple Distributions for Biased Random Test Patterns
Wunderlich, H.-J.
IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)
Vol. 9(6), June 1990, pp. 584-593
1990
DOI PDF 
Keywords: Random tests, biased random patterns, multiple weights, low cost test
Abstract: The test of integrated circuits by random patterns is very attractive, since no expensive test pattern generation is necessary and tests can be applied with a self-test technique or externally using linear feedback shift registers. Unfortunately, not all circuits are random testable, because either the fault coverage is too low or the required test length too large. In many cases the random test lengths can be reduced by orders of magnitude using weighted random patterns. However, there are also some circuits for which no single optimal set of weights exists. A set defines a distribution of the random patterns.
In this paper, it is shown that the problem can be solved using several distributions instead of a single one. Furthermore, an efficient procedure for computing the optimized input probabilities is presented. If a sufficient number of distributions is applied, then all combinational circuits can be tested randomly with moderate test lengths. The patterns can be produced by an external chip, and an optimized test schedule for circuits with a scan path can be obtained. Moreover, formulas are described to determine strong bounds on the probability of detecting all faults. Fault simulation with weighted patterns shows a nearly complete coverage of all nonredundant faults.
BibTeX:
@article{Wunde1990,
  author = {Wunderlich, Hans-Joachim},
  title = {{Multiple Distributions for Biased Random Test Patterns}},
  journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)},
  year = {1990},
  volume = {9},
  number = {6},
  pages = {584--593},
  keywords = {Random tests, biased random patterns, multiple weights, low cost test},
  abstract = {The test of integrated circuits by random patterns is very attractive, since no expensive test pattern generation is necessary and tests can be applied with a self-test technique or externally using linear feedback shift registers. Unfortunately, not all circuits are random testable, because either the fault coverage is too low or the required test length too large. In many cases the random test lengths can be reduced by orders of magnitude using weighted random patterns. However, there are also some circuits for which no single optimal set of weights exists. A set defines a distribution of the random patterns.
In this paper, it is shown that the problem can be solved using several distributions instead of a single one. Furthermore, an efficient procedure for computing the optimized input probabilities is presented. If a sufficient number of distributions is applied, then all combinational circuits can be tested randomly with moderate test lengths. The patterns can be produced by an external chip, and an optimized test schedule for circuits with a scan path can be obtained. Moreover, formulas are described to determine strong bounds on the probability of detecting all faults. Fault simulation with weighted patterns shows a nearly complete coverage of all nonredundant faults.}, doi = {http://dx.doi.org/10.1109/43.55187}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1990/TCAD_Wunde1990.pdf} }
24. Methoden der Testvorbereitung zum IC-Entwurf
Schulz, M.H. and Wunderlich, H.-J.
Mikroelektronik
Vol. 4(3), May-June 1990, pp. 112-115
1990
DOI PDF 
Abstract: Neben dem eigentlichen Testen umfasst eine Teststrategie die Auswahl eines geeigneten Fehlermodells, ein Verfahren für den prüfgerechten strukturierten Entwurf sowie die Testsatzerzeugung. Ziel dieser Prüfvorbereitung ist die Steigerung der Produktqualität sowie die Senkung der Testkosten bei integrierten Schaltungen.
BibTeX:
@article{WundeS1990,
  author = {Schulz, Martin H. and Wunderlich, Hans-Joachim},
  title = {{Methoden der Testvorbereitung zum IC-Entwurf}},
  journal = {Mikroelektronik},
  publisher = {VDE-Verlag},
  year = {1990},
  volume = {4},
  number = {3},
  pages = {112--115},
  abstract = {Neben dem eigentlichen Testen umfasst eine Teststrategie die Auswahl eines geeigneten Fehlermodells, ein Verfahren für den prüfgerechten strukturierten Entwurf sowie die Testsatzerzeugung. Ziel dieser Prüfvorbereitung ist die Steigerung der Produktqualität sowie die Senkung der Testkosten bei integrierten Schaltungen.},
  doi = {http://dx.doi.org/10.18419/opus-7919},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1990/ME_SchulW1990.pdf}
}
23. Tools and Devices Supporting the Pseudo-Exhaustive Test
Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 1st European Design Automation Conference (EDAC'90), Glasgow, United Kingdom, 12-15 March 1990, pp. 13-17
1990
DOI URL PDF 
Keywords: Pseudo-exhaustive test, automatic design for testability
Abstract: In this paper logical cells and algorithms are presented supporting the design of pseudo-exhaustively testable circuits. The approach is based on real hardware segmentation, instead of path-sensitizing. The developed cells segment the entire circuits into exhaustively testable parts, and the presented algorithms place these cells, under the objective to minimize the hardware overhead.
The approach is completely compatible with the usual LSSD-rules. The analysis of the well-known benchmark circuits shows only little additional hardware cost.
BibTeX:
@inproceedings{HelleW1990,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Tools and Devices Supporting the Pseudo-Exhaustive Test}},
  booktitle = {Proceedings of the 1st European Design Automation Conference (EDAC'90)},
  publisher = {IEEE Computer Society},
  year = {1990},
  pages = {13--17},
  keywords = {Pseudo-exhaustive test, automatic design for testability},
  abstract = {In this paper logical cells and algorithms are presented supporting the design of pseudo-exhaustively testable circuits. The approach is based on real hardware segmentation, instead of path-sensitizing. The developed cells segment the entire circuits into exhaustively testable parts, and the presented algorithms place these cells, under the objective to minimize the hardware overhead.
The approach is completely compatible with the usual LSSD-rules. The analysis of the well-known benchmark circuits shows only little additional hardware cost.}, url = {http://doi.acm.org/10.1145/949970.949974}, doi = {http://dx.doi.org/10.1109/EDAC.1990.136612}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1990/EDAC_HelleW1990.pdf} }
22. The Effectiveness of Different Test Sets for PLAs
Maxwell, P.C. and Wunderlich, H.-J.
Proceedings of the 1st European Design Automation Conference (EDAC'90), Glasgow, United Kingdom, 12-15 March 1990, pp. 628-632
1990
DOI URL PDF 
Abstract: It has been theoretically demonstrated that the single stuck-at fault model for a PLA does not cover as many faults as the single crosspoint model. What has not been demonstrated is the real relative effectiveness of test sets generated using these models. This paper presents the results of a study involving presenting a number of test sets to fabricated PLAs to determine their effectiveness. The test sets included weighted random patterns, of particular interest owing to PLAs being random resistant. Details are given of a method to generate weights, taking into account a PLA's structure.
BibTeX:
@inproceedings{MaxweW1990,
  author = {Maxwell, Peter C. and Wunderlich, Hans-Joachim},
  title = {{The Effectiveness of Different Test Sets for PLAs}},
  booktitle = {Proceedings of the 1st European Design Automation Conference (EDAC'90)},
  publisher = {IEEE Computer Society},
  year = {1990},
  pages = {628--632},
  abstract = {It has been theoretically demonstrated that the single stuck-at fault model for a PLA does not cover as many faults as the single crosspoint model. What has not been demonstrated is the real relative effectiveness of test sets generated using these models. This paper presents the results of a study involving presenting a number of test sets to fabricated PLAs to determine their effectiveness. The test sets included weighted random patterns, of particular interest owing to PLAs being random resistant. Details are given of a method to generate weights, taking into account a PLA's structure.},
  url = {http://doi.acm.org/10.1145/949970.950112},
  doi = {http://dx.doi.org/10.1109/EDAC.1990.136722},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1990/EDAC_MaxweW1990.pdf}
}
21. A Synthesis Approach to Reduce Scan Design Overhead
Eschermann, B. and Wunderlich, H.-J.
Proceedings of the 1st European Design Automation Conference (EDAC'90), Glasgow, United Kingdom, 12-15 March 1990, pp. 671
1990
DOI PDF 
Abstract: Today's logic design strategy is characterized by a division between synthesis, in which a functionally correct implementation is generated, and design for testability, in which the implementation is made testable. In this paper we propose to merge these two steps by utilizing a scan path structure to simplify the combinational logic of finite state machines. This results in a reduction of test overhead, because a part of the scan path is already incorporated during the synthesis process. Alternatively, it can be seen as using a new optimization potential in logic synthesis, since the circuit has to be made testable anyway and so the test hardware is provided "free".
BibTeX:
@inproceedings{WundeE1990,
  author = {Eschermann, Bernhard and Wunderlich, Hans-Joachim},
  title = {{A Synthesis Approach to Reduce Scan Design Overhead}},
  booktitle = {Proceedings of the 1st European Design Automation Conference (EDAC'90)},
  year = {1990},
  pages = {671},
  abstract = {Today's logic design strategy is characterized by a division between synthesis, in which a functionally correct implementation is generated, and design for testability, in which the implementation is made testable. In this paper we propose to merge these two steps by utilizing a scan path structure to simplify the combinational logic of finite state machines. This results in a reduction of test overhead, because a part of the scan path is already incorporated during the synthesis process. Alternatively, it can be seen as using a new optimization potential in logic synthesis, since the circuit has to be made testable anyway and so the test hardware is provided "free".},
  doi = {http://dx.doi.org/10.18419/opus-7927},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1990/EDAC_EscheW1990.pdf}
}
20. Automatische Synthese selbsttestbarer Moduln für hochkomplexe Schaltungen
Kesel, F. and Wunderlich, H.-J.
Proceedings of the ITG-Fachtagung Mikroelektronik für die Informationstechnik, Stuttgart, Germany, 3-5 October 1989, pp. 63-68
1989
DOI PDF 
Keywords: Selbsttest, automatische Synthese von Selbsttest-Hardware, ungleichverteilte Pseudozufallsmuster, linear rückgekoppelte Schieberegister
Abstract: Für den Test hochkomplexer digitaler Schaltungen bieten sich Selbsttestverfahren an, die auf multifunktionalen linear rückgekoppelten Schieberegistern beruhen. Diese erzeugen Pseudozufallsmuster und komprimieren die Testantworten zu einer Signatur. Durch einen automatischen Einbau der Selbsttestausstattung kann die Korrektheit des Entwurfs gewährleistet werden. Im vorliegenden Beitrag wird ein Verfahren vorgestellt, mit welchem sich multifunktionale Registerschaltungen automatisch synthetisieren lassen, welche gleich- und ungleichverteilte Pseudozufallsmuster erzeugen und die Testantworten durch Signaturanalyse komprimieren. Sie werden als Standardzellen erzeugt und können automatisch plaziert und verdrahtet werden.
BibTeX:
@inproceedings{KeselW1989a,
  author = {Kesel, F. and Wunderlich, Hans-Joachim},
  title = {{Automatische Synthese selbsttestbarer Moduln für hochkomplexe Schaltungen}},
  booktitle = {Proceedings of the ITG-Fachtagung Mikroelektronik für die Informationstechnik},
  year = {1989},
  pages = {63--68},
  keywords = {Selbsttest, automatische Synthese von Selbsttest-Hardware, ungleichverteilte Pseudozufallsmuster, linear rückgekoppelte Schieberegister},
  abstract = {Für den Test hochkomplexer digitaler Schaltungen bieten sich Selbsttestverfahren an, die auf multifunktionalen linear rückgekoppelten Schieberegistern beruhen. Diese erzeugen Pseudozufallsmuster und komprimieren die Testantworten zu einer Signatur. Durch einen automatischen Einbau der Selbsttestausstattung kann die Korrektheit des Entwurfs gewährleistet werden. Im vorliegenden Beitrag wird ein Verfahren vorgestellt, mit welchem sich multifunktionale Registerschaltungen automatisch synthetisieren lassen, welche gleich- und ungleichverteilte Pseudozufallsmuster erzeugen und die Testantworten durch Signaturanalyse komprimieren. Sie werden als Standardzellen erzeugt und können automatisch plaziert und verdrahtet werden.},
  doi = {http://dx.doi.org/10.18419/opus-7933},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1989/ITG_KeselW1989.pdf}
}
19. Methoden der Testvorbereitung
Wunderlich, H.-J. and Schulz, M.H.
Proceedings of the ITG-Fachtagung Mikroelektronik für die Informationstechnik, Stuttgart, Germany, 3-5 October 1989, pp. 55-62
1989
DOI PDF 
Abstract: Neben der eigentlichen Testdurchführung umfasst eine Teststrategie die Auswahl eines geeigneten Fehlermodells, ein Verfahren für den prüfgerechten strukturierten Entwurf und die Testsatzerzeugung. Ziel dieser Prüfvorbereitung ist die Steigerung der Produktqualität und die Senkung der Kosten für die Testdurchführung.
BibTeX:
@inproceedings{WundeS1989,
  author = {Wunderlich, Hans-Joachim and Schulz, Martin H.},
  title = {{Methoden der Testvorbereitung}},
  booktitle = {Proceedings of the ITG-Fachtagung Mikroelektronik für die Informationstechnik},
  year = {1989},
  pages = {55--62},
  abstract = {Neben der eigentlichen Testdurchführung umfasst eine Teststrategie die Auswahl eines geeigneten Fehlermodells, ein Verfahren für den prüfgerechten strukturierten Entwurf und die Testsatzerzeugung. Ziel dieser Prüfvorbereitung ist die Steigerung der Produktqualität und die Senkung der Kosten für die Testdurchführung.},
  doi = {http://dx.doi.org/10.18419/opus-7932},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1989/ITG_WundeS1989.pdf}
}
18. The Pseudo-Exhaustive Test of Sequential Circuits
Wunderlich, H.-J. and Hellebrand, S.
Proceedings of the 20th IEEE International Test Conference (ITC'89), Washington, DC, USA, 29-31 August 1989, pp. 19-27
1989
DOI PDF 
Keywords: Pseudo-exhaustive test, sequential circuits, design for testability
Abstract: The concept of a pseudo-exhaustive test for sequential circuits is introduced in a similar way as it is used for combinational networks. Instead of test sets one has to apply pseudo-exhaustive test sequences of a limited length, which provides well-known benefits as far as fault-coverage, self-test capability and simplicity of test generation are concerned.

Design methods are presented for hardware segmentation which ensure that a pseudo-exhaustive test is feasible. Example circuits show that the presented test-strategy requires less additional silicon area than a complete scan path.

BibTeX:
@inproceedings{WundeH1989,
  author = {Wunderlich, Hans-Joachim and Hellebrand, Sybille},
  title = {{The Pseudo-Exhaustive Test of Sequential Circuits}},
  booktitle = {Proceedings of the 20th IEEE International Test Conference (ITC'89)},
  publisher = {IEEE Computer Society},
  year = {1989},
  pages = {19--27},
  keywords = {Pseudo-exhaustive test, sequential circuits, design for testability},
  abstract = {The concept of a pseudo-exhaustive test for sequential circuits is introduced in a similar way as it is used for combinational networks. Instead of test sets one has to apply pseudo-exhaustive test sequences of a limited length, which provides well-known benefits as far as fault-coverage, self-test capability and simplicity of test generation are concerned. 

Design methods are presented for hardware segmentation which ensure that a pseudo-exhaustive test is feasible. Example circuits show that the presented test-strategy requires less additional silicon area than a complete scan path.}, doi = {http://dx.doi.org/10.1109/TEST.1989.82273}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1989/ITC_WundeH1989.pdf} }

17. The Design of Random-Testable Sequential Circuits
Wunderlich, H.-J.
Proceedings of the 19th International Symposium on Fault-Tolerant Computing (FTCS-19), Chicago, Illinois, USA, 21-23 June 1989, pp. 110-117
1989
DOI PDF 
Keywords: Random Test of Sequential Circuits, Built-In Self-Test, Partial Scan Path
Abstract: In general, sequential circuits are considered not to be random-testable, since a required sequence may grow exponentially with the number of flipflops, and it is very unlikely that a certain sequence occurs at random. This problem can be solved by combining two tasks:
1) A small part of the flipflops are directly accessible, for instance by a partial scan path or by a built-in self-test register.
2) Weighted random patterns are applied to the modified sequential circuit.
The paper describes a method to select a minimal set of flip-flops as mentioned in 1). Since this problem turns out to be NP-complete, suboptimal solutions can be derived using some heuristics. Furthermore, an algorithm is presented to compute the corresponding weights of the patterns, which are time-dependent in some cases. Finally the entire approach is validated with the help of examples. Only 10% - 40% of the flipflops have to be integrated into a partial scan path or into BIST-register in order to obtain nearly complete fault coverage by weighted random patterns.
BibTeX:
@inproceedings{Wunde1989,
  author = {Wunderlich, Hans-Joachim},
  title = {{The Design of Random-Testable Sequential Circuits}},
  booktitle = {Proceedings of the 19th International Symposium on Fault-Tolerant Computing (FTCS-19)},
  year = {1989},
  pages = {110--117},
  keywords = {Random Test of Sequential Circuits, Built-In Self-Test, Partial Scan Path},
  abstract = {In general, sequential circuits are considered not to be random-testable, since a required sequence may grow exponentially with the number of flipflops, and it is very unlikely that a certain sequence occurs at random. This problem can be solved by combining two tasks:
1) A small part of the flipflops are directly accessible, for instance by a partial scan path or by a built-in self-test register.
2) Weighted random patterns are applied to the modified sequential circuit.
The paper describes a method to select a minimal set of flip-flops as mentioned in 1). Since this problem turns out to be NP-complete, suboptimal solutions can be derived using some heuristics. Furthermore, an algorithm is presented to compute the corresponding weights of the patterns, which are time-dependent in some cases. Finally the entire approach is validated with the help of examples. Only 10% - 40% of the flipflops have to be integrated into a partial scan path or into BIST-register in order to obtain nearly complete fault coverage by weighted random patterns.}, doi = {http://dx.doi.org/10.1109/FTCS.1989.105552}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1989/FTCS_Wunde1989.pdf} }
16. The Synthesis of Self-Test Control Logic
Haberl, O.F. and Wunderlich, H.-J.
Proceedings of the CompEuro '89., 'VLSI and Computer Peripherals. VLSI and Microelectronic Applications in Intelligent Peripherals and their Interconnection Networks', Hamburg, Germany, 8-12 May 1989, pp. 5/134-5/136
1989
DOI PDF 
Abstract: In recent years, many built-in self-test techniques have been proposed based on feedback shift-registers for pattern generation and signature analysis. But in general, these test-registers cannot test several moduls of the chip concurrently, and they have to be controlled by an external automatic test equipment. The presented paper proposes a method to integrate the additional test control logic into the chip. Based on a register transfer description of the circuit, the test control is derived and an according finite automaton is synthesized. A hardware implementation is proposed, resulting in circuits, where the entire self-test only consists in activating the test mode, clocking and evaluating the overall signature.
BibTeX:
@inproceedings{HaberW1989,
  author = {Haberl, Oliver F. and Wunderlich, Hans-Joachim},
  title = {{The Synthesis of Self-Test Control Logic}},
  booktitle = {Proceedings of the CompEuro '89., 'VLSI and Computer Peripherals. VLSI and Microelectronic Applications in Intelligent Peripherals and their Interconnection Networks'},
  year = {1989},
  pages = {5/134--5/136},
  abstract = {In recent years, many built-in self-test techniques have been proposed based on feedback shift-registers for pattern generation and signature analysis. But in general, these test-registers cannot test several moduls of the chip concurrently, and they have to be controlled by an external automatic test equipment. The presented paper proposes a method to integrate the additional test control logic into the chip. Based on a register transfer description of the circuit, the test control is derived and an according finite automaton is synthesized. A hardware implementation is proposed, resulting in circuits, where the entire self-test only consists in activating the test mode, clocking and evaluating the overall signature.},
  doi = {http://dx.doi.org/10.1109/CMPEUR.1989.93499},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1989/VLSI_HaberW1989.pdf}
}
15. Parametrisierte Speicherzellen zur Unterstützung des Selbsttests mit optimierten und konventionellen Zufallsmustern
Kesel, F. and Wunderlich, H.-J.
GMD Berichte, 4. E.I.S.-Workshop, Bonn, Germany, 21-22 February 1989, pp. 75-84
1989
DOI PDF 
Abstract: Viele Selbsttestverfahren für hochintegriete Schaltungen beruhen auf der Erzeugung von Zufallsmustern mit rückgekoppelten Schieberegistern. Oft wird jedoch für eine ausreichende Fehlererfassung eine unwirtschaftlich grosse Menge von Zufallsmustern benötigt, falls diese gleichverteilt erzeugt werden. Mit ungleichverteilten Zufallsmustern kann die Testlänge entscheidend reduziert werden, ein entsprechendes Selbsttestkonzept wurde als GURT (Generator of Unequiprobable Random Tests) in (Wund87b) vorgeschlagen. Im vorliegenden Beitrag werden Grundzellen zur Synthese von Registern nach dem GURT-Prinzip vorgestellt, die Probleme beim Entwurf eines entsprechenden Syntheseprogrammes diskutiert. Anhand eines Beispieles werden die Selbstkonzepte nach dem GURT-und nach dem BILBO-Prinzip verglichen.
BibTeX:
@inproceedings{KeselW1989,
  author = {Kesel, Frank and Wunderlich, Hans-Joachim},
  title = {{Parametrisierte Speicherzellen zur Unterstützung des Selbsttests mit optimierten und konventionellen Zufallsmustern}},
  booktitle = {GMD Berichte, 4. E.I.S.-Workshop},
  year = {1989},
  pages = {75--84},
  abstract = {Viele Selbsttestverfahren für hochintegriete Schaltungen beruhen auf der Erzeugung von Zufallsmustern mit rückgekoppelten Schieberegistern. Oft wird jedoch für eine ausreichende Fehlererfassung eine unwirtschaftlich grosse Menge von Zufallsmustern benötigt, falls diese gleichverteilt erzeugt werden. Mit ungleichverteilten Zufallsmustern kann die Testlänge entscheidend reduziert werden, ein entsprechendes Selbsttestkonzept wurde als GURT (Generator of Unequiprobable Random Tests) in (Wund87b) vorgeschlagen. Im vorliegenden Beitrag werden Grundzellen zur Synthese von Registern nach dem GURT-Prinzip vorgestellt, die Probleme beim Entwurf eines entsprechenden Syntheseprogrammes diskutiert. Anhand eines Beispieles werden die Selbstkonzepte nach dem GURT-und nach dem BILBO-Prinzip verglichen.},
  doi = {http://dx.doi.org/10.18419/opus-7936},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1989/GMD_KeselW1989.pdf}
}
14. Automatisierung des Entwurfs vollständig testbarer Schaltungen
Hellebrand, S. and Wunderlich, H.-J.
Proceedings of the 18. GI Jahrestagung II
Vol. 188, Hamburg, Germany, 17-19 October 1988, pp. 145-159
1988
DOI PDF 
Abstract: Die Kosten für die Testvorbereitung, Testerzeugung und Testdurchführung wachsen überproportional mit der Komplexität anwendungsspezifischer Schaltungen, und die Teststrategie sollte daher bereitsin einer sehr frühen Phase des Schaltungsentwurfs festgelegt und berücksichtigt werden. In diesem Artikel werden logische Grundzellen und Algorithmen zur Unterstützung des pseudo-erschöpfenden Tests vorgestellt. Diese Teststrategie hat den Vorteil, daß die äußerst rechenzeitaufwendige Testmustererzeugung entfällt und zugleich eine vollständige Fehlererfassung auf Gatterebene garantiert ist. Die vorgestellten Grundzellen dienen der Zerlegung der Gesamtschaltung in erschöpfend testbare Teile, die präsentierten Algorithmen sollen diese Segmentierungszellen so plazieren, daß der Mehraufwand an Silizium gering bleibt. Hierzu wurden Varianten sogenannter "Hill-Climbing" und "Simulated-Annealing"-Verfahren entwickelt.
BibTeX:
@inproceedings{HelleW1988,
  author = {Hellebrand, Sybille and Wunderlich, Hans-Joachim},
  title = {{Automatisierung des Entwurfs vollständig testbarer Schaltungen}},
  booktitle = {Proceedings of the 18. GI Jahrestagung II},
  publisher = {Springer-Verlag},
  year = {1988},
  volume = {188},
  pages = {145--159},
  abstract = {Die Kosten für die Testvorbereitung, Testerzeugung und Testdurchführung wachsen überproportional mit der Komplexität anwendungsspezifischer Schaltungen, und die Teststrategie sollte daher bereitsin einer sehr frühen Phase des Schaltungsentwurfs festgelegt und berücksichtigt werden. In diesem Artikel werden logische Grundzellen und Algorithmen zur Unterstützung des pseudo-erschöpfenden Tests vorgestellt. Diese Teststrategie hat den Vorteil, daß die äußerst rechenzeitaufwendige Testmustererzeugung entfällt und zugleich eine vollständige Fehlererfassung auf Gatterebene garantiert ist. Die vorgestellten Grundzellen dienen der Zerlegung der Gesamtschaltung in erschöpfend testbare Teile, die präsentierten Algorithmen sollen diese Segmentierungszellen so plazieren, daß der Mehraufwand an Silizium gering bleibt. Hierzu wurden Varianten sogenannter "Hill-Climbing" und "Simulated-Annealing"-Verfahren entwickelt.},
  doi = {http://dx.doi.org/10.1007/978-3-642-74135-7_10},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1988/GIJahrestagung_HelleW1988.pdf}
}
13. Multiple Distributions for Biased Random Test Patterns
Wunderlich, H.-J.
Proceedings of the 19th IEEE International Test Conference (ITC'88). New Frontiers in Testing, International, Washington, DC, USA, 12-14 September 1988, pp. 236-244
1988
DOI URL PDF 
Keywords: Random tests, biased random patterns, multiple distributions, low cost test
Abstract: The test of integrated circuits by random patterns is very attractive, since no expensive test pattern generation is necessary and the test application can be done by a self-test technique or externally using linear feedback shift-registers. Unfortunately not all circuits are random-testable, since the fault coverage would be too low or the necessary test length would be too large. In many cases the random test lengths can be reduced by orders of magnitude using weighted random patterns. But there are also some circuits where no single optimal weight exists. In this paper it is shown that the problem is solved using several distributions instead of a single one. Furthermore an efficient procedure is presented computing the optimized input probabilities. This way all combinational circuits can be made random-testable. Fault simulation with weighted patterns shows a complete coverage of all non-redundant faults. The patterns can be successively produced by an external chip and an optimized test scheme for circuits in a scan design can be established. As a result of its own formulas are derived determining sharp bounds of the probability that all faults are detected.
BibTeX:
@inproceedings{Wunde1988a,
  author = {Wunderlich, Hans-Joachim},
  title = {{Multiple Distributions for Biased Random Test Patterns}},
  booktitle = {Proceedings of the 19th IEEE International Test Conference (ITC'88). New Frontiers in Testing, International},
  publisher = {IEEE Computer Society},
  year = {1988},
  pages = {236--244},
  keywords = {Random tests, biased random patterns, multiple distributions, low cost test},
  abstract = {The test of integrated circuits by random patterns is very attractive, since no expensive test pattern generation is necessary and the test application can be done by a self-test technique or externally using linear feedback shift-registers. Unfortunately not all circuits are random-testable, since the fault coverage would be too low or the necessary test length would be too large. In many cases the random test lengths can be reduced by orders of magnitude using weighted random patterns. But there are also some circuits where no single optimal weight exists. In this paper it is shown that the problem is solved using several distributions instead of a single one. Furthermore an efficient procedure is presented computing the optimized input probabilities. This way all combinational circuits can be made random-testable. Fault simulation with weighted patterns shows a complete coverage of all non-redundant faults. The patterns can be successively produced by an external chip and an optimized test scheme for circuits in a scan design can be established. As a result of its own formulas are derived determining sharp bounds of the probability that all faults are detected.},
  url = {http://dl.acm.org/citation.cfm?id=1896122.1896171},
  doi = {http://dx.doi.org/10.1109/TEST.1988.207808},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1988/ITC_Wunde1988.pdf}
}
12. Generating Pattern Sequences for the Pseudo-Exhaustive Test of MOS-Circuits
Wunderlich, H.-J. and Hellebrand, S.
Proceedings of the 18th International Symposium on Fault-Tolerant Computing (FTCS-18), Tokyo, Japan, 27-30 June 1988, pp. 36-41
1988
DOI PDF 
Keywords: Pseudo-exhaustive test, built-in test, stuck-open faults, LFSR, finite fields
Abstract: In order to ensure a high product quality some authors propose pseudo-exhaustive or verification testing. This is applicable if each primary output of the combinational circuit only depends on a small set of primary inputs, where all possible patterns can be enumerated completely. But in CMOS-circuits even a single stuck-open fault may fail to be detected this way, and the already proposed additional test of each input transition is not sufficient either.
In this paper a method based on linear feedback shift registers over finite fields is presented to generate for a natural number n a pattern sequence with minimal length detecting each m-multiple stuck-open fault for m <= n. A hardware architecture is discussed generating this sequence, and for n = 1 a built-in self-test approach is presented detecting all combinations of multiple combinational and single stuck-open faults.
BibTeX:
@inproceedings{WundeH1988,
  author = {Wunderlich, Hans-Joachim and Hellebrand, Sybille},
  title = {{Generating Pattern Sequences for the Pseudo-Exhaustive Test of MOS-Circuits}},
  booktitle = {Proceedings of the 18th International Symposium on Fault-Tolerant Computing (FTCS-18)},
  year = {1988},
  pages = {36--41},
  keywords = {Pseudo-exhaustive test, built-in test, stuck-open faults, LFSR, finite fields},
  abstract = {In order to ensure a high product quality some authors propose pseudo-exhaustive or verification testing. This is applicable if each primary output of the combinational circuit only depends on a small set of primary inputs, where all possible patterns can be enumerated completely. But in CMOS-circuits even a single stuck-open fault may fail to be detected this way, and the already proposed additional test of each input transition is not sufficient either.
In this paper a method based on linear feedback shift registers over finite fields is presented to generate for a natural number n a pattern sequence with minimal length detecting each m-multiple stuck-open fault for m <= n. A hardware architecture is discussed generating this sequence, and for n = 1 a built-in self-test approach is presented detecting all combinations of multiple combinational and single stuck-open faults.}, doi = {http://dx.doi.org/10.1109/FTCS.1988.5294}, file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1988/FTCS_WundeH1988.pdf} }
11. Weighted Random Patterns with Multiple Distributions
Wunderlich, H.-J.
Proceedings of the 11th International Conference on Fault Tolerant Systems and Diagnostics
Vol. 5(2), Suhl, German Democratic Republic, 6-9 June 1988, pp. 88-93
1988
DOI PDF 
Abstract: It is well known that random test lenths can be reduced by orders of magnitude using biased random patterns. But there are also some circuits resistant to optimising. In this paper it is shown that this problem can be solved using several distributions instead of a single one. Firstly we compute bounds of the error caused by the assumption that fault detection consists of completely independent events. Secondly we prove a sharp estimation of the error caused caused by assuming the random property instead of the pseudo-random property of shift register sequences. Finally a heuristic is presented in order to compute an optimal number of random pattern sets, where each set has its specific distribution and its specific size.
BibTeX:
@inproceedings{Wunde1988,
  author = {Wunderlich, Hans-Joachim},
  title = {{Weighted Random Patterns with Multiple Distributions}},
  booktitle = {Proceedings of the 11th International Conference on Fault Tolerant Systems and Diagnostics},
  year = {1988},
  volume = {5},
  number = {2},
  pages = {88--93},
  abstract = {It is well known that random test lenths can be reduced by orders of magnitude using biased random patterns. But there are also some circuits resistant to optimising. In this paper it is shown that this problem can be solved using several distributions instead of a single one. Firstly we compute bounds of the error caused by the assumption that fault detection consists of completely independent events. Secondly we prove a sharp estimation of the error caused caused by assuming the random property instead of the pseudo-random property of shift register sequences. Finally a heuristic is presented in order to compute an optimal number of random pattern sets, where each set has its specific distribution and its specific size.},
  doi = {http://dx.doi.org/10.18419/opus-7941},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1988/FTSD_Wunde1988.pdf}
}
10. Output-maximal control policies for cascaded production-inventory systems with control and state constraints
Warschat, J. and Wunderlich, H.-J.
International Journal of Systems Science
Vol. 19(6), January 1988, pp. 1011-1020
1988
DOI PDF 
Keywords: Optimal control, Planning, Optimal planning, Production, Finite horizon, Inventory, Bang bang control, Subsystem, Maximization, Minimum time, Cascade connection
Abstract: Optimal control policies are derived for cascaded production-inventory systems. As objectives, output maximization and the minimum time to produce a fixed output are considered. An example consisting of three subsystems is detailed to illustrate the proposed theory.
BibTeX:
@incollection{WarscW1988,
  author = {Warschat, J. and Wunderlich, Hans-Joachim},
  title = {{Output-maximal control policies for cascaded production-inventory systems with control and state constraints}},
  booktitle = {International Journal of Systems Science},
  publisher = {Taylor & Francis},
  year = {1988},
  volume = {19},
  number = {6},
  pages = {1011--1020},
  keywords = {Optimal control, Planning, Optimal planning, Production, Finite horizon, Inventory, Bang bang control, Subsystem, Maximization, Minimum time, Cascade connection},
  abstract = {Optimal control policies are derived for cascaded production-inventory systems. As objectives, output maximization and the minimum time to produce a fixed output are considered. An example consisting of three subsystems is detailed to illustrate the proposed theory.},
  doi = {http://dx.doi.org/10.1080/00207728808547182},
  file = {http://www.iti.uni-stuttgart.de/fileadmin/rami/files/publications/1988/IJSS_WarscW1988.pdf}
}
9. Integrated Tools for Automatic Design for Testability
Schmid, D., Wunderlich, H.-J., Feldbusch, F., Hellebrand, S., Holzinger, J. and Kunzmann, A.
Proceedings of the IFIP WG 10.2 Workshop on Tool Integration and Design Environments, Paderborn, Germany, 26-27 November 1987, pp. 233-258
1987
DOI