cloudy  trunk
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
grains_qheat.cpp
Go to the documentation of this file.
1 /* This file is part of Cloudy and is copyright (C)1978-2008 by Gary J. Ferland and
2  * others. For conditions of distribution and use see copyright notice in license.txt */
3 /*GrainMakeDiffuse main routine for generating the grain diffuse emission, called by RT_diffuse */
4 #include "cddefines.h"
5 #include "physconst.h"
6 #include "rfield.h"
7 #include "phycon.h"
8 #include "dense.h"
9 #include "hmi.h"
10 #include "thermal.h"
11 #include "trace.h"
12 #include "thirdparty.h"
13 #include "iterations.h"
14 #include "grainvar.h"
15 #include "grains.h"
16 
17 #define NO_ATOMS(ND) (gv.bin[ND]->AvVol*gv.bin[ND]->dustp[0]/ATOMIC_MASS_UNIT/gv.bin[ND]->atomWeight)
18 
19 /* NB NB -- the sequence below has been carefully chosen and should NEVER be
20  * altered unless you really know what you are doing !! */
21 /* >>chng 03 jan 16, introduced QH_THIGH_FAIL and started using splint_safe and spldrv_safe
22  * throughout the code; this solves the parispn_a031_sil.in bug, PvH */
23 /* >>chng 03 jan 16, rescheduled QH_STEP_FAIL as non-fatal; it can be triggered at very low temps, PvH */
24 typedef enum {
25  /* the following are OK */
26  /* 0 1 2 3 */
28  /* the following are mild errors we already recovered from */
29  /* 4 5 6 7 */
31  /* any of these errors will prompt qheat to do another iteration */
32  /* 8 9 10 11 */
34  /* any error larger than QH_NO_REBIN will cause GetProbDistr_LowLimit to return
35  * before even attempting to rebin the results; we may be able to recover though... */
36  /* 12 13 14 15 */
38  /* any case larger than QH_FATAL is truly pathological; there is no hope of recovery */
39  /* 16 17 18 19 */
41 } QH_Code;
42 
43 /*================================================================================*/
44 /* definitions relating to quantum heating routines */
45 
46 /* this is the minimum number of bins for quantum heating calculation to be valid */
47 static const long NQMIN = 10L;
48 
49 /* this is the lowest value for dPdlnT that should be included in the modeling */
50 static const double PROB_CUTOFF_LO = 1.e-15;
51 static const double PROB_CUTOFF_HI = 1.e-20;
52 static const double SAFETY = 1.e+8;
53 
54 /* during the first NSTARTUP steps, use special algorithm to calculate stepsize */
55 static const long NSTARTUP = 5L;
56 
57 /* if the average number of multiple events is above this number
58  * don't try full quantum heating treatment. */
59 static const double MAX_EVENTS = 150.;
60 
61 /* maximum number of tries for quantum heating routine */
62 /* >>chng 02 jan 30, changed LOOP_MAX from 10 -> 20, PvH */
63 static const long LOOP_MAX = 20L;
64 
65 /* if all else fails, divide temp estimate by this factor */
66 static const double DEF_FAC = 3.;
67 
68 /* total probability for all bins should not deviate more than this from 1. */
69 static const double PROB_TOL = 0.02;
70 
71 /* after NQTEST steps, make an estimate if prob distr will converge in NQGRID steps */
72 /* >>chng 02 jan 30, change NQTEST from 1000 -> 500, PvH */
73 static const long NQTEST = 500L;
74 
75 /* if the ratio fwhm/Umax is lower than this number
76  * don't try full quantum heating treatment. */
77 static const double FWHM_RATIO = 0.1;
78 /* if the ratio fwhm/Umax is lower than this number
79  * don't even try analytic approximation, use delta function instead */
80 static const double FWHM_RATIO2 = 0.007;
81 
82 /* maximum number of steps for integrating quantum heating probability distribution */
83 static const long MAX_LOOP = 2*NQGRID;
84 
85 /* this is the tolerance used while integrating the temperature distribution of the grains */
86 static const double QHEAT_TOL = 5.e-3;
87 
88 /* maximum number of tries before we declare that probability distribution simply won't fit */
89 static const long WIDE_FAIL_MAX = 3;
90 
91 /* multipliers for PROB_TOL used in GetProbDistr_HighLimit */
92 static const double STRICT = 1.;
93 static const double RELAX = 15.;
94 
95 /* when rebinning quantum heating results, make ln(qtemp[i]/qtemp[i-1]) == QT_RATIO */
96 /* this constant determines the accuracy with which the Wien tail of the grain emission
97  * is calculated; if x = h*nu/k*T_gr and d = QT_RATIO-1., then the relative accuracy of
98  * the flux in the Wien tail is Acc = fabs(x*d^2/12 - x^2*d^2/24). A typical value of x
99  * would be x = 15, so QT_RATIO = 1.03 should converge the spectrum to better than 1% */
100 static const double QT_RATIO = 1.03;
101 
102 
103 /*================================================================================*/
104 /* global variables */
105 
106 /* these data define the enthalpy function for silicates
107  * derived from:
108  * >>refer grain physics Guhathakurta & Draine, 1989, ApJ, 345, 230
109  * coefficients converted to rydberg energy units, per unit atom
110  * assuming a density of 3.3 g/cm^3 and pure MgSiFeO4.
111  * this is not right, but the result is correct because number
112  * of atoms will be calculated using the same assumption. */
113 
114 /* this is the specific density of silicate in g/cm^3 */
115 static const double DEN_SIL = 3.30;
116 
117 /* these are the mean molecular weights per atom for MgSiFeO4 and SiO2 in amu */
118 static const double MW_SIL = 24.6051;
119 /*#define MW_SIO2 20.0283*/
120 
121 static const double tlim[5]={0.,50.,150.,500.,DBL_MAX};
122 static const double ppower[4]={2.00,1.30,0.68,0.00};
123 static const double cval[4]={
128 
129 
130 /* initialize phiTilde */
131 STATIC void qheat_init(long,/*@out@*/double[],/*@out@*/double*);
132 /* worker routine, this implements the algorithm of Guhathakurtha & Draine */
133 STATIC void GetProbDistr_LowLimit(long,double,double,double,/*@in@*/double[],/*@in@*/double[],
134  /*@out@*/double[],/*@out@*/double[],/*@out@*/double[],
135  /*@out@*/long*,/*@out@*/double*,long*,/*@out@*/QH_Code*);
136 /* try two consecutive integration steps using stepsize "step/2." (yielding p[k]),
137  * and also one double integration step using stepsize "step" (yielding p2k). */
138 STATIC double TryDoubleStep(double[],double[],double[],double[],double[],double[],
139  double[],double,/*@out@*/double*,long,long,/*@out@*/bool*);
140 /* calculate logarithmic integral from (x1,y1) to (x2,y2) */
141 STATIC double log_integral(double,double,double,double);
142 /* scan the probability distribution for valid range */
143 STATIC void ScanProbDistr(double[],double[],long,double,long,/*@out@*/long*,/*@out@*/long*,
144  /*@out@*/long*,long*,QH_Code*);
145 /* rebin the quantum heating results to speed up RT_diffuse */
146 STATIC long RebinQHeatResults(long,long,long,double[],double[],double[],double[],double[],
147  double[],double[],QH_Code*);
148 /* calculate approximate probability distribution in high intensity limit */
149 STATIC void GetProbDistr_HighLimit(long,double,double,double,/*@out@*/double[],/*@out@*/double[],
150  /*@out@*/double[],/*@out@*/double*,/*@out@*/long*,
151  /*@out@*/double*,/*@out@*/QH_Code*);
152 /* derivative of the enthalpy function dU/dT */
153 STATIC double uderiv(double,long);
154 /* enthalpy function */
155 STATIC double ufunct(double,long,/*@out@*/bool*);
156 /* inverse enthalpy function */
157 STATIC double inv_ufunct(double,long,/*@out@*/bool*);
158 /* helper function for calculating enthalpy, uses Debye theory */
159 STATIC double DebyeDeriv(double,long);
160 
161 /* >>chng 01 oct 29, introduced gv.bin[nd]->cnv_H_pGR, cnv_GR_pH, etc. PvH */
162 
163 
164 /* main routine for generating the grain diffuse emission, called by RT_diffuse */
166 {
167  long i,
168  j,
169  nd;
170  double bfac,
171  f,
172  factor,
173  flux,
174  x;
175 
176  /* >>chng 01 sep 11, replace array allocation on stack with
177  * MALLOC to avoid bug in gcc 3.0 on Sparc platforms, PvH */
178  double *qtemp/*[NQGRID]*/, *qprob/*[NQGRID]*/;
179 
180 # ifndef NDEBUG
181  bool lgNoTdustFailures;
182  double BolFlux,
183  Comparison1,
184  Comparison2;
185 # endif
186 
187  /* this assures 6-digit precision in the evaluation of the exponential below */
188  const double LIM1 = pow(2.e-6,1./2.);
189  const double LIM2 = pow(6.e-6,1./3.);
190 
191  DEBUG_ENTRY( "GrainMakeDiffuse()" );
192 
193  factor = 2.*PI4*POW2(FR1RYD/SPEEDLIGHT)*FR1RYD;
194  /* >>chng 96 apr 26 upper limit chng from 15 to 75 Peter van Hoof */
195  /* >>chng 00 apr 10 use constants appropriate for double precision, by PvH */
196  x = log(0.999*DBL_MAX);
197 
198  /* save grain emission per unit volume */
199  for( i=0; i < rfield.nflux; i++ )
200  {
201  /* used in RT_diffuse to derive total emission */
202  gv.GrainEmission[i] = 0.;
203  gv.SilicateEmission[i] = 0.;
204  gv.GraphiteEmission[i] = 0.;
205  }
206 
207  qtemp = (double*)MALLOC((size_t)(NQGRID*sizeof(double)));
208  qprob = (double*)MALLOC((size_t)(NQGRID*sizeof(double)));
209 
210  for( nd=0; nd < gv.nBin; nd++ )
211  {
212  strg_type scase;
213  bool lgLocalQHeat;
214  long qnbin=-200;
215  realnum *grn, threshold;
216  double xx;
217 
218  /* save emission from this species */
219  scase = gv.which_strg[gv.bin[nd]->matType];
220  switch( scase )
221  {
222  case STRG_SIL:
223  grn = gv.SilicateEmission;
224  break;
225  case STRG_CAR:
226  grn = gv.GraphiteEmission;
227  break;
228  default:
229  fprintf( ioQQQ, " GrainMakeDiffuse called with unknown storage class: %d\n", scase );
230  cdEXIT(EXIT_FAILURE);
231  }
232 
233  /* this local copy is necessary to keep lint happy */
234  lgLocalQHeat = gv.bin[nd]->lgQHeat;
235  /* >>chng 04 nov 09, do not evaluate quantum heating if abundance is negligible, PvH
236  * this prevents PAH's deep inside molecular regions from failing if GrnVryDepth is used */
237  /* >>chng 04 dec 31, introduced separate thresholds near I-front and in molecular region, PvH */
238  threshold = ( dense.xIonDense[ipHYDROGEN][0]+dense.xIonDense[ipHYDROGEN][1] > hmi.H2_total ) ?
240 
241  if( lgLocalQHeat && gv.bin[nd]->dstAbund >= threshold )
242  {
243  qheat(qtemp,qprob,&qnbin,nd);
244 
245  if( gv.bin[nd]->lgUseQHeat )
246  {
247  ASSERT( qnbin > 0 );
248  }
249  }
250  else
251  {
252  /* >> chng 04 dec 31, repaired bug lgUseQHeat not set when abundance below threshold, PvH */
253  gv.bin[nd]->lgUseQHeat = false;
254  }
255 
256  flux = 1.;
257  /* flux can only become zero in the Wien tail */
258  /* >>chng 04 jan 25, replaced flux > 0. with (realnum)flux > 0.f, PvH */
259  for( i=0; i < rfield.nflux && (realnum)flux > 0.f; i++ )
260  {
261  flux = 0.;
262  if( lgLocalQHeat && gv.bin[nd]->lgUseQHeat )
263  {
264  xx = 1.;
265  /* we start at high temperature end and work our way down
266  * until contribution becomes negligible */
267  for( j=qnbin-1; j >= 0 && xx > flux*DBL_EPSILON; j-- )
268  {
269  f = TE1RYD/qtemp[j]*rfield.anu[i];
270  if( f < x )
271  {
272  /* want the number exp(hnu/kT) - 1, two expansions */
273  /* >>chng 04 jan 25, changed 1.e-5 -> LIM1, LIM2, PvH */
274  if( f > LIM2 )
275  bfac = exp(f) - 1.;
276  else if( f > LIM1 )
277  bfac = (1. + 0.5*f)*f;
278  else
279  bfac = f;
280  xx = qprob[j]*gv.bin[nd]->dstab1[i]*rfield.anu2[i]*
281  rfield.widflx[i]/bfac;
282  flux += xx;
283  }
284  else
285  {
286  xx = 0.;
287  }
288  }
289  }
290  else
291  {
292  f = TE1RYD/gv.bin[nd]->tedust*rfield.anu[i];
293  if( f < x )
294  {
295  /* >>chng 04 jan 25, changed 1.e-5 -> LIM1, LIM2, PvH */
296  if( f > LIM2 )
297  bfac = exp(f) - 1.;
298  else if( f > LIM1 )
299  bfac = (1. + 0.5*f)*f;
300  else
301  bfac = f;
302  flux = gv.bin[nd]->dstab1[i]*rfield.anu2[i]*rfield.widflx[i]/bfac;
303  }
304  }
305 
306  /* do multiplications outside loop, PvH */
307  flux *= factor*gv.bin[nd]->cnv_H_pCM3;
308 
309  /* remember local emission these are zeroed out on each zone
310  * above, and now incremented so is unit emission from this zone */
311  gv.GrainEmission[i] += (realnum)flux;
312  /* unit emission for each different grain type */
313  grn[i] += (realnum)flux;
314  }
315  }
316 
317  free( qprob );
318  free( qtemp );
319 
320 # ifndef NDEBUG
321  /*********************************************************************************
322  *
323  * Following are three checks on energy and charge conservation by the grain code.
324  * Their primary function is as an internal consistency check, so that coding
325  * errors get caught as early as possible. This is why the program terminates as
326  * soon as any one of the checks fails.
327  *
328  * NB NB - despite appearances, these checks do NOT guarantee overall energy
329  * conservation in the Cloudy model to the asserted tolerance, see note 1B !
330  *
331  * Note 1: there are two sources for energy imbalance in the grain code (see A & B).
332  * A: Interpolation in dstems. The code calculates how much energy the grains
333  * emit in thermal radiation (gv.bin[nd]->GrainHeat), and converts that into
334  * an (average) grain temperature by reverse interpolation in dstems. If
335  * quantum heating is not used, that temperature is used directly to generate
336  * the local diffuse emission. Hence the finite resolution of the dstems grid
337  * can lead to small errors in flux. This is tested in Check 1. The maximum
338  * error of interpolating in dstems scales with NDEMS^-3. The same problem
339  * can also occur when quantum heating is used, however, the fact that many
340  * bins are used will probably lead to a cancellation effect of the errors.
341  * B: RT_OTS_Update gets called AFTER grain() has completed, so the grain heating
342  * was calculated using a different version of the OTS fields than the one
343  * that gets fed into the RT routines (where the actual attenuation of the
344  * radiation fields by the grain opacity is done). This can lead to an energy
345  * imbalance, depending on how accurate the convergence of the OTS fields is.
346  * This is outside the control of the grain code and is therefore NOT checked.
347  * Rather, the grain code remembers the contribution from the old OTS fields
348  * (through gv.bin[nd]->BolFlux) and uses that in Check 3. In most models the
349  * difference will be well below 0.1%, but in AGN type models where OTS continua
350  * are important, the energy imbalance can be of the order of 0.5% of the grain
351  * heating (status nov 2001). On 04 jan 25 the initialization of phiTilde has
352  * been moved to qheat, implying that phiTilde now uses the updated version of
353  * the OTS fields. The total amount of radiated energy however is still based
354  * on gv.bin[nd]->GrainHeat which uses the old version of the OTS fields.
355  * C: Energy conservation for collisional processes is guaranteed by adding in
356  * (very small) correction terms. These corrections are needed to cover up
357  * small imperfection in the theory, and cannot be avoided without making the
358  * already very complex theory even more complex.
359  * D: Photo-electric heating and collisional cooling can have an important effect
360  * on the total heating balance of the gas. Both processes depend strongly on
361  * the grain charge, so assuring proper charge balance is important as well.
362  * This is tested in Check 2.
363  *
364  * Note 2: for quantum heating it is important to resolve the Maxwell distribution
365  * of the electrons and ions far enough into the tail that the total amount of
366  * energy contained in the remaining tail is negligible. If this is not the case,
367  * the assert at the beginning of the qheat() routine will fail. This is because
368  * the code filling up the phiTilde array in GrainCollHeating() assumes a value for
369  * the average particle energy based on a Maxwell distribution going to infinity.
370  * If the maximum energy used is too low, the assumed average energy would be
371  * incorrect.
372  *
373  *********************************************************************************/
374 
375  lgNoTdustFailures = true;
376  for( nd=0; nd < gv.nBin; nd++ )
377  {
378  if( !gv.bin[nd]->lgTdustConverged )
379  {
380  lgNoTdustFailures = false;
381  break;
382  }
383  }
384 
385  /* CHECK 1: does the grain thermal emission conserve energy ? */
386  BolFlux = 0.;
387  for( i=0; i < rfield.nflux; i++ )
388  {
389  BolFlux += gv.GrainEmission[i]*rfield.anu[i]*EN1RYD;
390  }
391  Comparison1 = 0.;
392  for( nd=0; nd < gv.nBin; nd++ )
393  {
394  if( gv.bin[nd]->tedust < gv.bin[nd]->Tsublimat )
395  Comparison1 += CONSERV_TOL*gv.bin[nd]->GrainHeat;
396  else
397  /* for high temperatures the interpolation in dstems
398  * is less accurate, so we have to be more lenient */
399  Comparison1 += 10.*CONSERV_TOL*gv.bin[nd]->GrainHeat;
400  }
401 
402  /* >>chng 04 mar 11, add constant grain temperature to pass assert */
403  /* >>chng 04 jun 01, deleted test for constant grain temperature, PvH */
404  ASSERT( fabs(BolFlux-gv.GrainHeatSum) < Comparison1 );
405 
406  /* CHECK 2: assert charging balance */
407  for( nd=0; nd < gv.nBin; nd++ )
408  {
409  if( gv.bin[nd]->lgChrgConverged )
410  {
411  double ave = 0.5*(gv.bin[nd]->RateDn+gv.bin[nd]->RateUp);
412  /* >>chng 04 dec 16, add lgAbort - do not throw assert if we are in
413  * process of aborting */
414  ASSERT( lgAbort || fabs(gv.bin[nd]->RateDn-gv.bin[nd]->RateUp) < CONSERV_TOL*ave );
415  }
416  }
417 
418  if( lgNoTdustFailures && gv.lgDHetOn && gv.lgDColOn && thermal.ConstGrainTemp == 0. )
419  {
420  /* CHECK 3: calculate the total energy donated to grains, must be balanced by
421  * the energy emitted in thermal radiation plus various forms of gas heating */
422  Comparison1 = 0.;
423  for( nd=0; nd < gv.nBin; nd++ )
424  {
425  Comparison1 += gv.bin[nd]->BolFlux;
426  }
427  /* add in collisional heating of grains by plasma (if positive) */
428  Comparison1 += MAX2(gv.GasCoolColl,0.);
429  /* add in net amount of chemical energy donated by recombining ions and molecule formation */
430  Comparison1 += gv.GrainHeatChem;
431 
432  /* thermal emis PE effect gas heating by coll thermionic emis */
433  Comparison2 = gv.GrainHeatSum+thermal.heating[0][13]+thermal.heating[0][14]+thermal.heating[0][25];
434 
435  /* >>chng 06 jun 02, add test on gv.GrainHeatScaleFactor so that assert not thrown
436  * when set grain heat command is used */
438  fabs(Comparison1-Comparison2)/Comparison2 < CONSERV_TOL );
439  }
440 # endif
441  return;
442 }
443 
444 
445 /****************************************************************************
446  *
447  * qheat: driver routine for non-equilibrium heating of grains
448  *
449  * This routine calls GetProbDistr_LowLimit, GetProbDistr_HighLimit
450  * (which do the actual non-equilibrium calculations), and does the
451  * subsequent quality control.
452  *
453  * Written by Peter van Hoof (UK, CITA, QUB).
454  *
455  ****************************************************************************/
456 
457 /* this is the new version of the quantum heating code, used starting Cloudy 96 beta 3 */
458 
459 void qheat(/*@out@*/ double qtemp[], /* qtemp[NQGRID] */
460  /*@out@*/ double qprob[], /* qprob[NQGRID] */
461  /*@out@*/ long int *qnbin,
462  long int nd)
463 {
464  bool lgBoundErr,
465  lgDelta,
466  lgNegRate,
467  lgOK,
468  lgTried;
469  long int i,
470  nWideFail;
471  QH_Code ErrorCode,
472  ErrorCode2,
473  ErrorStart;
474  double c0,
475  c1,
476  c2,
477  check,
478  DefFac,
479  deriv,
480  *dPdlnT/*[NQGRID]*/,
481  fwhm,
482  FwhmRatio,
483  integral,
484  minBracket,
485  maxBracket,
486  new_tmin,
487  NumEvents,
488  oldy,
489  *Phi/*[NC_ELL]*/,
490  *PhiDrv/*[NC_ELL]*/,
491  *phiTilde/*[NC_ELL]*/,
492  rel_tol,
493  Tmax,
494  tol,
495  Umax,
496  xx,
497  y;
498 
499 
500  DEBUG_ENTRY( "qheat()" );
501 
502  /* sanity checks */
503  ASSERT( gv.bin[nd]->lgQHeat );
504  ASSERT( nd >= 0 && nd < gv.nBin );
505 
506  if( trace.lgTrace && trace.lgDustBug )
507  {
508  fprintf( ioQQQ, "\n >>>>qheat called for grain %s\n", gv.bin[nd]->chDstLab );
509  }
510 
511  /* >>chng 01 aug 22, allocate space */
512  /* phiTilde is continuum corrected for photo-electric effect, in events/H/s/cell, default depl */
513  phiTilde = (double*)MALLOC((size_t)((unsigned)rfield.nupper*sizeof(double)) );
514  Phi = (double*)MALLOC((size_t)((unsigned)rfield.nupper*sizeof(double)) );
515  PhiDrv = (double*)MALLOC((size_t)((unsigned)rfield.nupper*sizeof(double)) );
516  dPdlnT = (double*)MALLOC((size_t)(NQGRID*sizeof(double)) );
517 
518  qheat_init( nd, phiTilde, &check );
519 
520  check += gv.bin[nd]->GrainHeatColl-gv.bin[nd]->GrainCoolTherm;
521 
522  xx = integral = 0.;
523  c0 = c1 = c2 = 0.;
524  lgNegRate = false;
525  oldy = 0.;
526  tol = 1.;
527 
528  /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */
529  /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */
530  for( i=gv.bin[nd]->qnflux-1; i >= 0; i-- )
531  {
532  /* >>chng 97 jul 17, to summed continuum */
533  /* >>chng 00 mar 30, to phiTilde, to keep track of photo-electric effect and collisions, by PvH */
534  /* >>chng 01 oct 10, use trapezoidal rule for integrating Phi, reverse direction of integration.
535  * Phi[i] is now integral from exactly rfield.anu[i] to infinity to second-order precision, PvH */
536  /* >>chng 01 oct 30, change normalization of Phi, PhiDrv from <unit>/cm^3 -> <unit>/grain, PvH */
537  double fac = ( i < gv.bin[nd]->qnflux-1 ) ? 1./rfield.widflx[i] : 0.;
538  /* phiTilde has units events/H/s, y has units events/grain/s/Ryd */
539  y = phiTilde[i]*gv.bin[nd]->cnv_H_pGR*fac;
540  /* PhiDrv[i] = (Phi[i+1]-Phi[i])/(anu[i+1]-anu[i]), units events/grain/s/Ryd */
541  PhiDrv[i] = -0.5*(y + oldy);
542  /* there is a minus sign here because we are integrating from infinity downwards */
543  xx -= PhiDrv[i]*(rfield.anu[i+1]-rfield.anu[i]);
544  /* Phi has units events/grain/s */
545  Phi[i] = xx;
546 
547 # ifndef NDEBUG
548  /* trapezoidal rule is not needed for integral, this is also second-order correct */
549  integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD;
550 # endif
551 
552  /* c<n> has units Ryd^(n+1)/grain/s */
553  c0 += Phi[i]*rfield.widflx[i];
554  c1 += Phi[i]*rfield.anu[i]*rfield.widflx[i];
555  c2 += Phi[i]*POW2(rfield.anu[i])*rfield.widflx[i];
556 
557  lgNegRate = lgNegRate || ( phiTilde[i] < 0. );
558 
559  oldy = y;
560  }
561 
562  /* sanity check */
563  ASSERT( fabs(check-integral)/check <= CONSERV_TOL );
564 
565 # if 0
566  {
567  char fnam[50];
568  FILE *file;
569 
570  sprintf(fnam,"Lambda_%2.2ld.asc",nd);
571  file = fopen(fnam,"w");
572  for( i=0; i < NDEMS; ++i )
573  fprintf(file,"%e %e %e\n",
574  exp(gv.dsttmp[i]),
575  ufunct(exp(gv.dsttmp[i]),nd,&lgBoundErr),
576  exp(gv.bin[nd]->dstems[i])*gv.bin[nd]->cnv_H_pGR/EN1RYD);
577  fclose(file);
578 
579  sprintf(fnam,"Phi_%2.2ld.asc",nd);
580  file = fopen(fnam,"w");
581  for( i=0; i < gv.bin[nd]->qnflux; ++i )
582  fprintf(file,"%e %e\n", rfield.anu[i],Phi[i]);
583  fclose(file);
584  }
585 # endif
586 
587  /* Tmax is where p(U) will peak (at least in high intensity limit) */
588  Tmax = gv.bin[nd]->tedust;
589  /* grain enthalpy at peak of p(U), in Ryd */
590  Umax = ufunct(Tmax,nd,&lgBoundErr);
591  ASSERT( !lgBoundErr ); /* this should never happen */
592  /* y is dln(Lambda)/dlnT at Tmax */
593  spldrv_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(Tmax),&y,&lgBoundErr);
594  ASSERT( !lgBoundErr ); /* this should never happen */
595  /* deriv is dLambda/dU at Umax, in 1/grain/s */
596  deriv = y*c0/(uderiv(Tmax,nd)*Tmax);
597  /* estimated FWHM of probability distribution, in Ryd */
598  fwhm = sqrt(8.*LN_TWO*c1/deriv);
599 
600  NumEvents = POW2(fwhm)*c0/(4.*LN_TWO*c2);
601  FwhmRatio = fwhm/Umax;
602 
603  /* >>chng 01 nov 15, change ( NumEvents > MAX_EVENTS2 ) --> ( FwhmRatio < FWHM_RATIO2 ), PvH */
604  lgDelta = ( FwhmRatio < FWHM_RATIO2 );
605  /* high intensity case is always OK since we will use equilibrium treatment */
606  lgOK = lgDelta;
607 
608  ErrorStart = QH_OK;
609 
610  if( lgDelta )
611  {
612  /* in this case we ignore negative rates, equilibrium treatment is good enough */
613  lgNegRate = false;
614  ErrorStart = MAX2(ErrorStart,QH_DELTA);
615  }
616 
617  if( lgNegRate )
618  ErrorStart = MAX2(ErrorStart,QH_NEGRATE_FAIL);
619 
620  ErrorCode = ErrorStart;
621 
622  if( trace.lgTrace && trace.lgDustBug )
623  {
624  double Rate2 = 0.;
625  for( int nz=0; nz < gv.bin[nd]->nChrg; nz++ )
626  Rate2 += gv.bin[nd]->chrg[nz]->FracPop*gv.bin[nd]->chrg[nz]->HeatingRate2;
627 
628  fprintf( ioQQQ, " grain heating: %.4e, integral %.4e, total rate %.4e lgNegRate %c\n",
629  gv.bin[nd]->GrainHeat,integral,Phi[0],TorF(lgNegRate));
630  fprintf( ioQQQ, " av grain temp %.4e av grain enthalpy (Ryd) %.4e\n",
631  gv.bin[nd]->tedust,Umax);
632  fprintf( ioQQQ, " fwhm^2/(4ln2*c2/c0): %.4e fwhm (Ryd) %.4e fwhm/Umax %.4e\n",
633  NumEvents,fwhm,FwhmRatio );
634  fprintf( ioQQQ, " HeatingRate1 %.4e HeatingRate2 %.4e lgQHTooWide %c\n",
635  gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3, Rate2*gv.bin[nd]->cnv_H_pCM3,
636  TorF(gv.bin[nd]->lgQHTooWide) );
637  }
638 
639  /* these two variables will bracket qtmin, they should only be needed during the initial search phase */
640  minBracket = GRAIN_TMIN;
641  maxBracket = gv.bin[nd]->tedust;
642 
643  /* >>chng 02 jan 30, introduced lgTried to avoid running GetProbDistr_HighLimit over and over..., PvH */
644  lgTried = false;
645  /* >>chng 02 aug 06, introduced QH_WIDE_FAIL and nWideFail, PvH */
646  nWideFail = 0;
647  /* >>chng 03 jan 27, introduced DefFac to increase factor for repeated LOW_FAIL's, PvH */
648  DefFac = DEF_FAC;
649  /* >>chng 04 nov 10, introduced rel_tol to increase precision in case of repeated CONV_FAIL's, PvH */
650  rel_tol = 1.;
651 
652  /* if average number of multiple photon events is too high, lgOK is set to true */
653  /* >>chng 02 aug 12, added gv.bin[nd]->lgQHTooWide to prevent unnecessary looping here.
654  * In general the number of integration steps needed to integrate the probability distribution
655  * will increase monotonically with depth into the cloud. Hence, once the distribution becomes
656  * too wide to fit into NQGRID steps (which only happens for extremely cold grains in deeply
657  * shielded conditions) there is no hope of ever converging GetProbDistr_LowLimit and the code
658  * will waste a lot of CPU time establishing this for every zone again. So once the distribution
659  * becomes too wide we immediately skip to the analytic approximation to save time, PvH */
660  for( i=0; i < LOOP_MAX && !lgOK && !gv.bin[nd]->lgQHTooWide; i++ )
661  {
662  if( gv.bin[nd]->qtmin >= gv.bin[nd]->tedust )
663  {
664  /* >>chng 02 jul 31, was gv.bin[nd]->qtmin = 0.7*gv.bin[nd]->tedust */
665  /* >>chng 03 nov 10, changed Umax/exp(+... to Umax*exp(-... to avoid overflow, PvH */
666  double Ulo = Umax*exp(-sqrt(-log(PROB_CUTOFF_LO)/(4.*LN_TWO))*fwhm/Umax);
667  double MinEnth = exp(gv.bin[nd]->DustEnth[0]);
668  Ulo = MAX2(Ulo,MinEnth);
669  gv.bin[nd]->qtmin = inv_ufunct(Ulo,nd,&lgBoundErr);
670  ASSERT( !lgBoundErr ); /* this should never happen */
671  /* >>chng 02 jul 30, added this test; prevents problems with ASSERT below, PvH */
672  if( gv.bin[nd]->qtmin <= minBracket || gv.bin[nd]->qtmin >= maxBracket )
673  gv.bin[nd]->qtmin = sqrt(minBracket*maxBracket);
674  }
675  gv.bin[nd]->qtmin = MAX2(gv.bin[nd]->qtmin,GRAIN_TMIN);
676 
677  ASSERT( minBracket <= gv.bin[nd]->qtmin && gv.bin[nd]->qtmin <= maxBracket );
678 
679  ErrorCode = ErrorStart;
680 
681  /* >>chng 01 nov 15, add ( FwhmRatio >= FWHM_RATIO ), PvH */
682  if( FwhmRatio >= FWHM_RATIO && NumEvents <= MAX_EVENTS )
683  {
684  GetProbDistr_LowLimit(nd,rel_tol,Umax,fwhm,Phi,PhiDrv,qtemp,qprob,dPdlnT,qnbin,
685  &new_tmin,&nWideFail,&ErrorCode);
686 
687  /* >>chng 02 jan 07, various changes to improve convergence for very cold grains, PvH */
688  if( ErrorCode == QH_DELTA_FAIL && fwhm < Umax && !lgTried )
689  {
690  double dummy;
691 
692  /* this situation can mean two things: either the photon rate is so high that
693  * the code needs too many steps to integrate the probability distribution,
694  * or alternatively, tmin is far too low and the code needs too many steps
695  * to overcome the rising side of the probability distribution.
696  * So we call GetProbDistr_HighLimit first to determine if the former is the
697  * case; if that fails then the latter must be true and we reset QH_DELTA_FAIL */
698  ErrorCode = MAX2(ErrorStart,QH_ANALYTIC);
699  /* use dummy to avoid losing estimate for new_tmin from GetProbDistr_LowLimit */
700  /* >>chng 02 aug 06, introduced STRICT and RELAX, PvH */
701  GetProbDistr_HighLimit(nd,STRICT,Umax,fwhm,qtemp,qprob,dPdlnT,&tol,qnbin,&dummy,
702  &ErrorCode);
703 
704  if( ErrorCode >= QH_RETRY )
705  {
706  ErrorCode = QH_DELTA_FAIL;
707  lgTried = true;
708  }
709  }
710 
711  /* >>chng 02 aug 07 major rewrite of the logic below */
712  if( ErrorCode < QH_NO_REBIN )
713  {
714  if( new_tmin < minBracket || new_tmin > maxBracket )
715  ++nWideFail;
716 
717  if( nWideFail < WIDE_FAIL_MAX )
718  {
719  if( new_tmin <= minBracket )
720  new_tmin = sqrt(gv.bin[nd]->qtmin*minBracket);
721  if( new_tmin >= maxBracket )
722  new_tmin = sqrt(gv.bin[nd]->qtmin*maxBracket);
723  }
724  else
725  {
726  ErrorCode = MAX2(ErrorCode,QH_WIDE_FAIL);
727  }
728 
729  if( ErrorCode == QH_CONV_FAIL )
730  {
731  rel_tol *= 0.9;
732  }
733  }
734  else if( ErrorCode == QH_LOW_FAIL )
735  {
736  double help1 = gv.bin[nd]->qtmin*sqrt(DefFac);
737  double help2 = sqrt(gv.bin[nd]->qtmin*maxBracket);
738  minBracket = gv.bin[nd]->qtmin;
739  new_tmin = MIN2(help1,help2);
740  /* increase factor in case we get repeated LOW_FAIL's */
741  DefFac += 1.5;
742  }
743  else if( ErrorCode == QH_HIGH_FAIL )
744  {
745  double help = sqrt(gv.bin[nd]->qtmin*minBracket);
746  maxBracket = gv.bin[nd]->qtmin;
747  new_tmin = MAX2(gv.bin[nd]->qtmin/DEF_FAC,help);
748  }
749  else
750  {
751  new_tmin = sqrt(minBracket*maxBracket);
752  }
753  }
754  else
755  {
756  GetProbDistr_HighLimit(nd,STRICT,Umax,fwhm,qtemp,qprob,dPdlnT,&tol,qnbin,&new_tmin,
757  &ErrorCode);
758  }
759 
760  gv.bin[nd]->qtmin = new_tmin;
761 
762  lgOK = ( ErrorCode < QH_RETRY );
763 
764  if( ErrorCode >= QH_FATAL )
765  break;
766 
767  if( ErrorCode != QH_LOW_FAIL )
768  DefFac = DEF_FAC;
769 
770  if( trace.lgTrace && trace.lgDustBug )
771  {
772  fprintf( ioQQQ, " GetProbDistr returns code %d\n", ErrorCode );
773  if( !lgOK )
774  {
775  fprintf( ioQQQ, " >>>>qheat trying another iteration, qtmin bracket %.4e %.4e",
776  minBracket,maxBracket );
777  fprintf( ioQQQ, " nWideFail %ld\n", nWideFail );
778  }
779  }
780  }
781 
782  if( ErrorCode == QH_WIDE_FAIL )
783  gv.bin[nd]->lgQHTooWide = true;
784 
785  /* >>chng 03 jan 17, added test for !lgDelta, PvH */
786  /* if( gv.bin[nd]->lgQHTooWide ) */
787  if( gv.bin[nd]->lgQHTooWide && !lgDelta )
788  ErrorCode = MAX2(ErrorCode,QH_WIDE_FAIL);
789 
790 /* if( ErrorCode >= QH_RETRY ) */
791 /* printf( " *** PROBLEM loop not converged, errorcode %d\n",ErrorCode ); */
792 
793  /* The quantum heating code tends to run into trouble when it goes deep into the neutral zone,
794  * especially if the original spectrum was very hard, as is the case in high excitation PNe or AGN.
795  * You then get a bipartition in the spectrum where most of the photons have low energy, while
796  * there still are hard X-ray photons left. The fact that the average energy per photon is low
797  * forces the quantum code to take tiny little steps when integrating the probability distribution,
798  * while the fact that X-ray photons are still present implies that big temperature spikes still
799  * occur and hence the temperature distribution is very wide. Therefore the code needs a zillion
800  * steps to integrate the probability distribution and simply runs out of room. As a last resort
801  * try the analytic approximation with relaxed constraints used below. */
802  /* >>chng 02 oct 03, vary Umax and fwhm to force fit with fwhm/Umax remaining constant */
803  /* >>chng 03 jan 17, changed test so that last resort always gets tried when lgOK = lgDelta = false, PvH */
804  /* if( !lgOK && FwhmRatio >= FWHM_RATIO && NumEvents <= MAX_EVENTS ) */
805  if( !lgOK && !lgDelta )
806  {
807  double Umax2 = Umax*sqrt(tol);
808  double fwhm2 = fwhm*sqrt(tol);
809 
810  for( i=0; i < LOOP_MAX; ++i )
811  {
812  double dummy;
813 
814  ErrorCode2 = MAX2(ErrorStart,QH_ANALYTIC);
815  GetProbDistr_HighLimit(nd,RELAX,Umax2,fwhm2,qtemp,qprob,dPdlnT,&tol,qnbin,&dummy,
816  &ErrorCode2);
817 
818  lgOK = ( ErrorCode2 < QH_RETRY );
819  if( lgOK )
820  {
821  gv.bin[nd]->qtmin = dummy;
822  ErrorCode = ErrorCode2;
823  break;
824  }
825  else
826  {
827  Umax2 *= sqrt(tol);
828  fwhm2 *= sqrt(tol);
829  }
830  }
831  }
832 
833  if( nzone == 1 )
834  gv.bin[nd]->qtmin_zone1 = gv.bin[nd]->qtmin;
835 
836  gv.bin[nd]->lgUseQHeat = ( lgOK && !lgDelta );
837  gv.bin[nd]->lgEverQHeat = ( gv.bin[nd]->lgEverQHeat || gv.bin[nd]->lgUseQHeat );
838 
839  if( lgOK )
840  {
841  if( trace.lgTrace && trace.lgDustBug )
842  fprintf( ioQQQ, " >>>>qheat converged with code: %d\n", ErrorCode );
843  }
844  else
845  {
846  *qnbin = 0;
847  ++gv.bin[nd]->QHeatFailures;
848  fprintf( ioQQQ, " PROBLEM qheat did not converge grain %s in zone %ld, error code = %d\n",
849  gv.bin[nd]->chDstLab,nzone,ErrorCode );
850  }
851 
852  if( gv.QHPunchFile != NULL && ( iterations.lgLastIt || !gv.lgQHPunLast ) )
853  {
854  fprintf( gv.QHPunchFile, "\nDust Temperature Distribution: grain %s zone %ld\n",
855  gv.bin[nd]->chDstLab,nzone );
856 
857  fprintf( gv.QHPunchFile, "Equilibrium temperature: %.2f\n", gv.bin[nd]->tedust );
858 
859  if( gv.bin[nd]->lgUseQHeat )
860  {
861  /* >>chng 01 oct 09, remove qprob from output, it depends on step size, PvH */
862  fprintf( gv.QHPunchFile, "Number of bins: %ld\n", *qnbin );
863  fprintf( gv.QHPunchFile, " Tgrain dP/dlnT\n" );
864  for( i=0; i < *qnbin; i++ )
865  {
866  fprintf( gv.QHPunchFile, "%.4e %.4e\n", qtemp[i],dPdlnT[i] );
867  }
868  }
869  else
870  {
871  fprintf( gv.QHPunchFile, "**** quantum heating was not used\n" );
872  }
873  }
874 
875  free ( phiTilde );
876  free ( Phi );
877  free ( PhiDrv );
878  free ( dPdlnT );
879  return;
880 }
881 
882 
883 /* initialize phiTilde */
884 STATIC void qheat_init(long nd,
885  /*@out@*/ double phiTilde[], /* phiTilde[rfield.nupper] */
886  /*@out@*/ double *check)
887 {
888  long i,
889  nz;
890  double sum = 0.;
891 
892  /*@-redef@*/
893  enum {DEBUG_LOC=false};
894  /*@+redef@*/
895 
896  DEBUG_ENTRY( "qheat_init()" );
897 
898  /* sanity checks */
899  ASSERT( gv.bin[nd]->lgQHeat );
900  ASSERT( nd >= 0 && nd < gv.nBin );
901 
902  *check = 0.;
903 
904  /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */
905  /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */
906  for( i=0; i < gv.bin[nd]->qnflux; i++ )
907  {
908  phiTilde[i] = 0.;
909  }
910 
911  /* fill in auxiliary array for quantum heating routine
912  * it reshuffles the input spectrum times the cross section to take
913  * the photo-electric effect into account. this prevents the quantum
914  * heating routine from having to calculate this effect over and over
915  * again; it can do a straight integration instead, making the code
916  * a lot simpler and faster. this initializes the array for non-ionizing
917  * energies, the reshuffling for higher energies is done in the next loop
918  * phiTilde has units events/H/s/cell at default depletion */
919 
920  double NegHeatingRate = 0.;
921 
922  for( nz=0; nz < gv.bin[nd]->nChrg; nz++ )
923  {
924  double check1 = 0.;
925  ChargeBin *gptr = gv.bin[nd]->chrg[nz];
926 
927  /* integrate over incident continuum for non-ionizing energies */
928  for( i=0; i < MIN2(gptr->ipThresInf,rfield.nflux); i++ )
929  {
930  check1 += rfield.SummedCon[i]*gv.bin[nd]->dstab1[i]*rfield.anu[i];
931  phiTilde[i] += gptr->FracPop*rfield.SummedCon[i]*gv.bin[nd]->dstab1[i];
932  }
933 
934  /* >>chng 01 mar 02, use new expresssions for grain cooling and absorption
935  * cross sections following the discussion with Joe Weingartner, PvH */
936  for( i=gptr->ipThresInf; i < rfield.nflux; i++ )
937  {
938  long ipLo2 = gptr->ipThresInfVal;
939  double cs1 = ( i >= ipLo2 ) ? gv.bin[nd]->dstab1[i]*gptr->yhat_primary[i] : 0.;
940 
941  check1 += rfield.SummedCon[i]*gptr->fac1[i];
942  /* this accounts for the photons that are fully absorbed by grain */
943  phiTilde[i] += gptr->FracPop*rfield.SummedCon[i]*MAX2(gv.bin[nd]->dstab1[i]-cs1,0.);
944 
945  /* >>chng 01 oct 10, use bisection search to find ip. On C scale now */
946 
947  /* this accounts for photons that eject an electron from the valence band */
948  if( cs1 > 0. )
949  {
950  /* we treat photo-ejection and all subsequent de-excitation cascades
951  * from the conduction/valence band as one simultaneous event */
952  /* the condition cs1 > 0. assures i >= ipLo2 */
953  /* ratio is number of ejected electrons per primary ionization */
954  double ratio = ( gv.lgWD01 ) ? 1. : gptr->yhat[i]/gptr->yhat_primary[i];
955  /* ehat is average energy of ejected electron at infinity */
956  double ehat = gptr->ehat[i];
957  double cool1, sign = 1.;
958  realnum xx;
959 
960  if( gptr->DustZ <= -1 )
961  cool1 = gptr->ThresSurf + gptr->PotSurf + ehat;
962  else
963  cool1 = gptr->ThresSurfVal + gptr->PotSurf + ehat;
964  /* secondary electrons are assumed to have the same Elo and Ehi as the
965  * primary electrons that excite them. This neglects the threshold for
966  * the ejection of the secondary electron and can cause xx to become
967  * negative if Ehi is less than that threshold. To conserve energy we
968  * will simply assume a negative rate here. Since secondary electrons
969  * are generally not important this should have little impact on the
970  * overall temperature distribution */
971  xx = rfield.anu[i] - (realnum)(ratio*cool1);
972  if( xx < 0.f )
973  {
974  xx = -xx;
975  sign = -1.;
976  }
977  long ipLo = hunt_bisect( gv.anumin, i+1, xx );
978  phiTilde[ipLo] += sign*gptr->FracPop*rfield.SummedCon[i]*cs1;
979  }
980 
981  /* no need to account for photons that eject an electron from the conduction band */
982  /* >>chng 01 dec 11, cool2 always equal to rfield.anu[i] -> no grain heating */
983  }
984 
985  *check += gptr->FracPop*check1*EN1RYD*gv.bin[nd]->cnv_H_pCM3;
986 
987  sum += gptr->FracPop*check1*EN1RYD*gv.bin[nd]->cnv_H_pCM3;
988 
989  if( DEBUG_LOC )
990  {
991  double integral = 0.;
992  for( i=0; i < gv.bin[nd]->qnflux; i++ )
993  {
994  integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD;
995  }
996  dprintf( ioQQQ, " integral test 1: integral %.6e %.6e\n", integral, sum );
997  }
998 
999  /* add quantum heating due to recombination of electrons, subtract thermionic cooling */
1000 
1001  /* gptr->HeatingRate2 is net heating rate in erg/H/s at standard depl
1002  * includes contributions for recombining electrons, autoionizing electrons
1003  * subtracted by thermionic emissions here since it is inverse process
1004  *
1005  * NB - in extreme conditions this rate may be negative (if there
1006  * is an intense radiation field leading to very hot grains, but no ionizing
1007  * photons, hence very few free electrons). we assume that the photon rates
1008  * are high enough under those circumstances to avoid phiTilde becoming negative,
1009  * but we will check that in qheat1 anyway. */
1010 
1011  /* >>chng 03 nov 06, check for extremely low HeatingRate and save CPU time, pah_crash.in, PvH */
1012  if( gptr->HeatingRate2*gv.bin[nd]->cnv_H_pCM3 > 0.05*CONSERV_TOL*gv.bin[nd]->GrainHeat )
1013  {
1014  double *RateArr,Sum,ESum,DSum,Delta,E_av2,Corr;
1015  double fac = BOLTZMANN/EN1RYD*phycon.te;
1016  /* E0 is barrier that electron needs to overcome, zero for positive grains */
1017  /* >>chng 03 jan 23, added second term to correctly account for autoionizing states
1018  * where ThresInfInc is negative, tested in small_grain.in, PvH */
1019  double E0 = -(MIN2(gptr->PotSurfInc,0.) + MIN2(gptr->ThresInfInc,0.));
1020  /* >>chng 01 mar 02, this should be energy gap between top electron and infinity, PvH */
1021  /* >>chng 01 nov 21, use correct barrier: ThresInf[nz] -> ThresInfInc[nz], PvH */
1022  /* >>chng 03 jan 23, replaced -E0 with MIN2(PotSurfInc[nz],0.), PvH */
1023  double Einf = gptr->ThresInfInc + MIN2(gptr->PotSurfInc,0.);
1024  /* this is average energy deposited by one event, in erg
1025  * this value is derived from distribution assumed here, and may
1026  * not be the same as HeatElectrons/(CollisionRateElectr*eta) !! */
1027  /* >>chng 01 nov 21, use correct barrier: ThresInf[nz] -> ThresInfInc[nz], PvH */
1028  /* >>chng 03 jan 23, replaced ThresInfInc[nz] with MAX2(ThresInfInc[nz],0.), PvH */
1029  double E_av = MAX2(gptr->ThresInfInc,0.)*EN1RYD + 2.*BOLTZMANN*phycon.te;
1030  /* this is rate in events/H/s at standard depletion */
1031  double rate = gptr->HeatingRate2/E_av;
1032 
1033  double ylo = -exp(-E0/fac);
1034  /* this is highest kinetic energy of electron that can be represented in phiTilde */
1035  /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */
1036  /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */
1037  double Ehi = gv.anumax[gv.bin[nd]->qnflux-1]-Einf;
1038  double yhi = ((E0-Ehi)/fac-1.)*exp(-Ehi/fac);
1039  /* renormalize rate so that integral over phiTilde*anu gives correct total energy */
1040  rate /= yhi-ylo;
1041 
1042  /* correct for fractional population of this charge state */
1043  rate *= gptr->FracPop;
1044 
1045  /* >>chng 03 jan 24, add code to correct for discretization errors, hotdust.in, PvH */
1046  RateArr=(double*)MALLOC((size_t)((unsigned)gv.bin[nd]->qnflux*sizeof(double)));
1047  Sum = ESum = DSum = 0.;
1048 
1049  /* >>chng 04 jan 21, replaced gv.bin[nd]->qnflux -> gv.bin[nd]->qnflux2, PvH */
1050  for( i=0; i < gv.bin[nd]->qnflux2; i++ )
1051  {
1052  Ehi = gv.anumax[i] - Einf;
1053  if( Ehi >= E0 )
1054  {
1055  /* Ehi is kinetic energy of electron at infinity */
1056  yhi = ((E0-Ehi)/fac-1.)*exp(-Ehi/fac);
1057  /* >>chng 01 mar 24, use MAX2 to protect against roundoff error, PvH */
1058  RateArr[i] = rate*MAX2(yhi-ylo,0.);
1059  Sum += RateArr[i];
1060  ESum += rfield.anu[i]*RateArr[i];
1061 # ifndef NDEBUG
1062  DSum += rfield.widflx[i]*RateArr[i];
1063 # endif
1064  ylo = yhi;
1065  }
1066  else
1067  {
1068  RateArr[i] = 0.;
1069  }
1070  }
1071  E_av2 = ESum/Sum*EN1RYD;
1072  Delta = DSum/Sum*EN1RYD;
1073  ASSERT( fabs(E_av-E_av2) <= Delta );
1074  Corr = E_av/E_av2;
1075 
1076  for( i=0; i < gv.bin[nd]->qnflux2; i++ )
1077  {
1078  phiTilde[i] += RateArr[i]*Corr;
1079  }
1080 
1081  sum += gptr->FracPop*gptr->HeatingRate2*gv.bin[nd]->cnv_H_pCM3;
1082 
1083  if( DEBUG_LOC )
1084  {
1085  double integral = 0.;
1086  for( i=0; i < gv.bin[nd]->qnflux; i++ )
1087  {
1088  integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD;
1089  }
1090  dprintf( ioQQQ, " integral test 2: integral %.6e %.6e\n", integral, sum );
1091  }
1092 
1093  free( RateArr );
1094  }
1095  else
1096  {
1097  NegHeatingRate += gptr->FracPop*gptr->HeatingRate2*gv.bin[nd]->cnv_H_pCM3;
1098  }
1099  }
1100 
1101  /* ============================================================================= */
1102 
1103  /* add quantum heating due to molecule/ion collisions */
1104 
1105  /* gv.bin[nd]->HeatingRate1 is heating rate in erg/H/s at standard depl
1106  * includes contributions from molecules/neutral atoms and recombining ions
1107  *
1108  * in fully ionized conditions electron heating rates will be much higher
1109  * than ion and molecule rates since electrons are so much faster and grains
1110  * tend to be positive. in non-ionized conditions the main contribution will
1111  * come from neutral atoms and molecules, so it is appropriate to treat both
1112  * the same. in fully ionized conditions we don't care since unimportant.
1113  *
1114  * NB - if grains are hotter than ambient gas, the heating rate may become negative.
1115  * if photon rates are not high enough to prevent phiTilde from becoming negative,
1116  * we will raise a flag while calculating the quantum heating in qheat1 */
1117 
1118  /* >>chng 03 nov 06, check for extremely low HeatingRate and save CPU time, PvH */
1119  if( gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3 > 0.05*CONSERV_TOL*gv.bin[nd]->GrainHeat )
1120  {
1121  /* limits for Taylor expansion of (1+x)*exp(-x) */
1122  /* these choices will assure only 6 digits precision */
1123  const double LIM2 = pow(3.e-6,1./3.);
1124  const double LIM3 = pow(8.e-6,1./4.);
1125  /* if gas temperature is higher than grain temperature we will
1126  * consider Maxwell-Boltzmann distribution of incoming particles
1127  * and ignore distribution of outgoing particles, if grains
1128  * are hotter than ambient gas, we use reverse treatment */
1129  double fac = BOLTZMANN/EN1RYD*MAX2(phycon.te,gv.bin[nd]->tedust);
1130  /* this is average energy deposited/extracted by one event, in erg */
1131  double E_av = 2.*BOLTZMANN*MAX2(phycon.te,gv.bin[nd]->tedust);
1132  /* this is rate in events/H/s at standard depletion */
1133  double rate = gv.bin[nd]->HeatingRate1/E_av;
1134 
1135  double ylo = -1.;
1136  /* this is highest energy of incoming/outgoing particle that can be represented in phiTilde */
1137  /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */
1138  /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */
1139  double Ehi = gv.anumax[gv.bin[nd]->qnflux-1];
1140  double yhi = -(Ehi/fac+1.)*exp(-Ehi/fac);
1141  /* renormalize rate so that integral over phiTilde*anu gives correct total energy */
1142  rate /= yhi-ylo;
1143 
1144  for( i=0; i < gv.bin[nd]->qnflux2; i++ )
1145  {
1146  /* Ehi is kinetic energy of incoming/outgoing particle
1147  * we assume that Ehi-E0 is deposited/extracted from grain */
1148  /* Ehi = gv.anumax[i]; */
1149  double x = gv.anumax[i]/fac;
1150  /* (1+x)*exp(-x) = 1 - 1/2*x^2 + 1/3*x^3 - 1/8*x^4 + O(x^5)
1151  * = 1 - Sum_n=2^infty (-x)^n/(n*(n-2)!) */
1152  if( x > LIM3 )
1153  yhi = -(x+1.)*exp(-x);
1154  else if( x > LIM2 )
1155  yhi = -(((1./3.)*x - 0.5)*x*x + 1.);
1156  else
1157  yhi = -(1. - 0.5*x*x);
1158 
1159  /* >>chng 01 mar 24, use MAX2 to protect against roundoff error, PvH */
1160  phiTilde[i] += rate*MAX2(yhi-ylo,0.);
1161  ylo = yhi;
1162  }
1163 
1164  sum += gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3;
1165 
1166  if( DEBUG_LOC )
1167  {
1168  double integral = 0.;
1169  for( i=0; i < gv.bin[nd]->qnflux; i++ )
1170  {
1171  integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD;
1172  }
1173  dprintf( ioQQQ, " integral test 3: integral %.6e %.6e\n", integral, sum );
1174  }
1175  }
1176  else
1177  {
1178  NegHeatingRate += gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3;
1179  }
1180 
1181  /* here we account for the negative heating rates, we simply do that by scaling the entire
1182  * phiTilde array down by a constant factor such that the total amount of energy is conserved
1183  * This treatment assures that phiTilde never goes negative, which avoids problems further on */
1184  if( NegHeatingRate < 0. )
1185  {
1186  double scale_fac = (sum+NegHeatingRate)/sum;
1187  for( i=0; i < gv.bin[nd]->qnflux; i++ )
1188  phiTilde[i] *= scale_fac;
1189 
1190  sum += NegHeatingRate;
1191 
1192  if( DEBUG_LOC )
1193  {
1194  double integral = 0.;
1195  for( i=0; i < gv.bin[nd]->qnflux; i++ )
1196  {
1197  integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD;
1198  }
1199  dprintf( ioQQQ, " integral test 4: integral %.6e %.6e\n", integral, sum );
1200  }
1201  }
1202 
1203  return;
1204 }
1205 
1206 
1207 /*******************************************************************************************
1208  *
1209  * GetProbDistr_LowLimit: main routine for calculating non-equilibrium heating of grains
1210  *
1211  * This routine implements the algorithm outlined in:
1212  * >>refer grain physics Guhathakurtha & Draine, 1989, ApJ, 345, 230
1213  *
1214  * The original (fortran) version of the code was written by Kevin Volk.
1215  *
1216  * Heavily modified and adapted for new style grains by Peter van Hoof.
1217  *
1218  *******************************************************************************************/
1219 
1221  double rel_tol,
1222  double Umax,
1223  double fwhm,
1224  /*@in@*/ double Phi[], /* Phi[NQGRID] */
1225  /*@in@*/ double PhiDrv[], /* PhiDrv[NQGRID] */
1226  /*@out@*/ double qtemp[], /* qtemp[NQGRID] */
1227  /*@out@*/ double qprob[], /* qprob[NQGRID] */
1228  /*@out@*/ double dPdlnT[], /* dPdlnT[NQGRID] */
1229  /*@out@*/ long int *qnbin,
1230  /*@out@*/ double *new_tmin,
1231  long *nWideFail,
1232  /*@out@*/ QH_Code *ErrorCode)
1233 {
1234  bool lgAllNegSlope,
1235  lgBoundErr;
1236  long int j,
1237  k,
1238  l,
1239  nbad,
1240  nbin,
1241  nend=0,
1242  nmax,
1243  nok,
1244  nstart=0,
1245  nstart2=0;
1246  double dCool,
1247  delu[NQGRID],
1248  dlnLdlnT,
1249  dlnpdlnU,
1250  fac = 0.,
1251  Lambda[NQGRID],
1252  maxVal,
1253  NextStep,
1254  p[NQGRID],
1255  qtmin1,
1256  RadCooling,
1257  sum,
1258  u1[NQGRID],
1259  y;
1260 
1261 
1262  DEBUG_ENTRY( "GetProbDistr_LowLimit()" );
1263 
1264  /* sanity checks */
1265  ASSERT( nd >= 0 && nd < gv.nBin );
1266 
1267  if( trace.lgTrace && trace.lgDustBug )
1268  {
1269  fprintf( ioQQQ, " GetProbDistr_LowLimit called for grain %s\n", gv.bin[nd]->chDstLab );
1270  fprintf( ioQQQ, " got qtmin1 %.4e\n", gv.bin[nd]->qtmin);
1271  }
1272 
1273  qtmin1 = gv.bin[nd]->qtmin;
1274  qtemp[0] = qtmin1;
1275  /* u1 holds enthalpy in Ryd/grain */
1276  u1[0] = ufunct(qtemp[0],nd,&lgBoundErr);
1277  ASSERT( !lgBoundErr ); /* this should never happen */
1278  /* >>chng 00 mar 22, taken out factor 4, factored in hden and dstAbund
1279  * interpolate in dstems array instead of integrating explicitly, by PvH */
1280  splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(qtemp[0]),&y,&lgBoundErr);
1281  ASSERT( !lgBoundErr ); /* this should never happen */
1282  /* Lambda holds the radiated energy for grains in this bin, in Ryd/s/grain */
1283  Lambda[0] = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD;
1284  /* set up first step of integration */
1285  /* >>chng 01 nov 14, set to 2.*Lambda[0]/Phi[0] instead of u1[0],
1286  * this choice assures that p[1] doesn't make a large jump from p[0], PvH */
1287  delu[0] = 2.*Lambda[0]/Phi[0];
1288  p[0] = PROB_CUTOFF_LO;
1289  dPdlnT[0] = p[0]*qtemp[0]*uderiv(qtemp[0],nd);
1290  RadCooling = 0.5*p[0]*Lambda[0]*delu[0];
1291  NextStep = 0.01*Lambda[0]/Phi[0];
1292  /* >>chng 03 nov 10, added extra safeguard against stepsize too small, PvH */
1293  if( NextStep < 0.05*DBL_EPSILON*u1[0] )
1294  {
1295  *ErrorCode = MAX2(*ErrorCode,QH_STEP_FAIL);
1296  return;
1297  }
1298  else if( NextStep < 5.*DBL_EPSILON*u1[0] )
1299  {
1300  /* try a last resort... */
1301  NextStep *= 100.;
1302  }
1303 
1304  nbad = 0;
1305  k = 0;
1306 
1307  *qnbin = 0;
1308  *new_tmin = qtmin1;
1309  lgAllNegSlope = true;
1310  maxVal = dPdlnT[0];
1311  nmax = 0;
1312 
1313  /* this test neglects a negative contribution which is impossible to calculate, so it may
1314  * fail to detect cases where the probability distribution starts dropping immediately,
1315  * we will use a second test using the variable lgAllNegSlope below to catch those cases, PvH */
1316  spldrv_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(qtemp[0]),&dlnLdlnT,&lgBoundErr);
1317  ASSERT( !lgBoundErr ); /* this should never happen */
1318  dlnpdlnU = u1[0]*Phi[0]/Lambda[0] - dlnLdlnT*u1[0]/(qtemp[0]*uderiv(qtemp[0],nd));
1319  if( dlnpdlnU < 0. )
1320  {
1321  /* >>chng 03 nov 06, confirm this by integrating first step..., pah_crash.in, PvH */
1322  (void)TryDoubleStep(u1,delu,p,qtemp,Lambda,Phi,PhiDrv,NextStep,&dCool,k,nd,&lgBoundErr);
1323  dPdlnT[2] = p[2]*qtemp[2]*uderiv(qtemp[2],nd);
1324 
1325  if( dPdlnT[2] < dPdlnT[0] ) {
1326  /* if dPdlnT starts falling immediately,
1327  * qtmin1 was too high and convergence is impossible */
1328  *ErrorCode = MAX2(*ErrorCode,QH_HIGH_FAIL);
1329  return;
1330  }
1331  }
1332 
1333  /* NB NB -- every break in this loop should set *ErrorCode (except for regular stop criterion) !! */
1334  for( l=0; l < MAX_LOOP; ++l )
1335  {
1336  double RelError = TryDoubleStep(u1,delu,p,qtemp,Lambda,Phi,PhiDrv,NextStep,&dCool,k,nd,&lgBoundErr);
1337 
1338  /* this happens if the grain temperature in qtemp becomes higher than GRAIN_TMAX
1339  * nothing that TryDoubleStep returns can be trusted, so this check should be first */
1340  if( lgBoundErr )
1341  {
1342  nbad += 2;
1343  *ErrorCode = MAX2(*ErrorCode,QH_THIGH_FAIL);
1344  break;
1345  }
1346 
1347  /* estimate new stepsize */
1348  if( RelError > rel_tol*QHEAT_TOL )
1349  {
1350  nbad += 2;
1351 
1352  /* step is rejected, decrease stepsize and try again */
1353  NextStep *= sqrt(0.9*rel_tol*QHEAT_TOL/RelError);
1354 
1355  /* stepsize too small, this can happen at extreme low temperatures */
1356  if( NextStep < 5.*DBL_EPSILON*u1[k] )
1357  {
1358  *ErrorCode = MAX2(*ErrorCode,QH_STEP_FAIL);
1359  break;
1360  }
1361 
1362  continue;
1363  }
1364  else
1365  {
1366  /* step was OK, adjust stepsize */
1367  k += 2;
1368 
1369  /* >>chng 03 nov 10, safeguard against division by zero, PvH */
1370  NextStep *= MIN2(pow(0.9*rel_tol*QHEAT_TOL/MAX2(RelError,1.e-50),1./3.),4.);
1371  NextStep = MIN2(NextStep,Lambda[k]/Phi[0]);
1372  }
1373 
1374  dPdlnT[k-1] = p[k-1]*qtemp[k-1]*uderiv(qtemp[k-1],nd);
1375  dPdlnT[k] = p[k]*qtemp[k]*uderiv(qtemp[k],nd);
1376 
1377  lgAllNegSlope = lgAllNegSlope && ( dPdlnT[k] < dPdlnT[k-2] );
1378 
1379  if( dPdlnT[k-1] > maxVal )
1380  {
1381  maxVal = dPdlnT[k-1];
1382  nmax = k-1;
1383  }
1384  if( dPdlnT[k] > maxVal )
1385  {
1386  maxVal = dPdlnT[k];
1387  nmax = k;
1388  }
1389 
1390  RadCooling += dCool;
1391 
1392 // if( nzone >= 24 && nd == 0 ) {
1393 // printf(" k %ld T[k] %.6e U[k] %.6e p[k] %.6e dPdlnT[k] %.6e\n",k-1,qtemp[k-1],u1[k-1],p[k-1],dPdlnT[k-1]);
1394 // printf(" k %ld T[k] %.6e U[k] %.6e p[k] %.6e dPdlnT[k] %.6e\n",k,qtemp[k],u1[k],p[k],dPdlnT[k]);
1395 // }
1396 
1397  /* if qtmin is far too low, p[k] can become extremely large, exceeding
1398  * even double precision range. the following check should prevent overflows */
1399  /* >>chng 01 nov 07, sqrt(DBL_MAX) -> sqrt(DBL_MAX/100.) so that sqrt(p[k]*p[k+1]) is safe */
1400  if( p[k] > sqrt(DBL_MAX/100.) )
1401  {
1402  *ErrorCode = MAX2(*ErrorCode,QH_LOW_FAIL);
1403  break;
1404 
1405 #if 0
1406  /* >>chng 01 apr 21, change DBL_MAX -> DBL_MAX/100. to work around
1407  * a bug in the Compaq C compiler V6.3-025 when invoked with -fast, PvH */
1408  for( j=0; j <= k; j++ )
1409  {
1410  p[j] /= DBL_MAX/100.;
1411  dPdlnT[j] /= DBL_MAX/100.;
1412  }
1413  RadCooling /= DBL_MAX/100.;
1414 #endif
1415  }
1416 
1417  /* this may catch a bug in the Compaq C compiler V6.3-025
1418  * if this gets triggered, try compiling with -ieee */
1419  ASSERT( p[k] > 0. && dPdlnT[k] > 0. && RadCooling > 0. );
1420 
1421  /* do a check for negative slope and if there will be enough room to store results */
1422  /* >>chng 02 jan 30, do this test only every NQTEST steps, PvH */
1423  /* if( k >= MIN2(NQTEST,NQGRID-2) ) */
1424  if( k > 0 && k%NQTEST == 0 )
1425  {
1426  double wid, avStep, factor;
1427  /* >>chng 02 jul 31, do additional test for HIGH_FAIL,
1428  * first test before main loop doesn't catch everything, PvH */
1429  if( lgAllNegSlope )
1430  {
1431  *ErrorCode = MAX2(*ErrorCode,QH_HIGH_FAIL);
1432  break;
1433  }
1434 
1435  /* this is a lower limit for the total width of the probability distr */
1436  /* >>chng 02 jan 30, corrected calculation of wid and avStep, PvH */
1437  wid = (sqrt(-log(PROB_CUTOFF_LO)/(4.*LN_TWO)) +
1438  sqrt(-log(PROB_CUTOFF_HI)/(4.*LN_TWO)))*fwhm/Umax;
1439  avStep = log(u1[k]/u1[0])/(double)k;
1440  /* make an estimate for the number of steps needed */
1441  /* >>chng 02 jan 30, included factor 1.5 because stepsize increases near peak, PvH */
1442  /* >>chng 02 aug 06, changed 1.5 to sliding scale because of laqheat2.in test, PvH */
1443  factor = 1.1 + 3.9*(1.0 - sqrt((double)k/(double)NQGRID));
1444  if( wid/avStep > factor*(double)NQGRID )
1445  {
1446  *ErrorCode = MAX2(*ErrorCode,QH_ARRAY_FAIL);
1447  break;
1448  }
1449  }
1450 
1451  /* if we run out of room to store results, do regular break
1452  * the code below will sort out if integration is valid or not */
1453  if( k >= NQGRID-2 )
1454  {
1455  *ErrorCode = MAX2(*ErrorCode,QH_ARRAY_FAIL);
1456  break;
1457  }
1458 
1459  /* force thermal equilibrium of the grains */
1460  fac = RadCooling*gv.bin[nd]->cnv_GR_pCM3*EN1RYD/gv.bin[nd]->GrainHeat;
1461 
1462  /* this is regular stop criterion */
1463  if( dPdlnT[k] < dPdlnT[k-1] && dPdlnT[k]/fac < PROB_CUTOFF_HI )
1464  {
1465  break;
1466  }
1467  }
1468 
1469  if( l == MAX_LOOP )
1470  *ErrorCode = MAX2(*ErrorCode,QH_LOOP_FAIL);
1471 
1472  nok = k;
1473  nbin = k+1;
1474 
1475  /* there are insufficient bins to attempt rebinning */
1476  if( *ErrorCode < QH_NO_REBIN && nbin < NQMIN )
1477  *ErrorCode = MAX2(*ErrorCode,QH_NBIN_FAIL);
1478 
1479  /* >>chng 02 aug 07, do some basic checks on the distribution first */
1480  if( *ErrorCode < QH_NO_REBIN )
1481  ScanProbDistr(u1,dPdlnT,nbin,maxVal,nmax,&nstart,&nstart2,&nend,nWideFail,ErrorCode);
1482 
1483  if( *ErrorCode >= QH_NO_REBIN )
1484  {
1485  return;
1486  }
1487 
1488  for( j=0; j < nbin; j++ )
1489  {
1490  p[j] /= fac;
1491  dPdlnT[j] /= fac;
1492  }
1493  RadCooling /= fac;
1494 
1495  /* >>chng 02 aug 08, moved RebinQHeatResults from here further down, this improves new_tmin estimate */
1496  *new_tmin = 0.;
1497  for( j=nstart; j < nbin; j++ )
1498  {
1499  if( dPdlnT[j] < PROB_CUTOFF_LO )
1500  {
1501  *new_tmin = qtemp[j];
1502  }
1503  else
1504  {
1505  if( j == nstart )
1506  {
1507  if( dPdlnT[j] > SAFETY*PROB_CUTOFF_LO )
1508  *ErrorCode = MAX2(*ErrorCode,QH_BOUND_FAIL);
1509 
1510  /* >>chng 02 aug 11, use nstart2 for more reliable extrapolation */
1511  if( dPdlnT[nstart2] < 0.999*dPdlnT[nstart2+NSTARTUP] )
1512  {
1513  /* >>chng 02 aug 09, new formula for extrapolating new_tmin, PvH */
1514  /* this assumes that at low temperatures the behaviour
1515  * is as follows: dPdlnT(T) = C1 * exp( -C2/T**3 ) */
1516  double T1 = qtemp[nstart2];
1517  double T2 = qtemp[nstart2+NSTARTUP];
1518  double delta_y = log(dPdlnT[nstart2+NSTARTUP]/dPdlnT[nstart2]);
1519  double c2 = delta_y/(1./POW3(T1)-1./POW3(T2));
1520  double help = c2/POW3(T1) + log(dPdlnT[nstart2]/PROB_CUTOFF_LO);
1521  *new_tmin = pow(c2/help,1./3.);
1522  }
1523 
1524  /* >>chng 04 nov 09, in case of lower bound failure, assure qtmin is lowered, PvH */
1525  if( dPdlnT[j] > SAFETY*PROB_CUTOFF_LO && *new_tmin >= qtmin1 )
1526  {
1527  double delta_x = log(qtemp[nstart2+NSTARTUP]/qtemp[nstart2]);
1528  double delta_y = log(dPdlnT[nstart2+NSTARTUP]/dPdlnT[nstart2]);
1529  delta_x *= log(PROB_CUTOFF_LO/dPdlnT[nstart2])/delta_y;
1530  *new_tmin = qtemp[nstart2]*exp(delta_x);
1531  if( *new_tmin < qtmin1 )
1532  /* in general this estimate will be too low -> use geometric mean */
1533  *new_tmin = sqrt( *new_tmin*qtmin1 );
1534  else
1535  /* last resort... */
1536  *new_tmin = qtmin1/DEF_FAC;
1537  }
1538  }
1539  break;
1540  }
1541  }
1542  *new_tmin = MAX3(*new_tmin,qtmin1/DEF_FAC,GRAIN_TMIN);
1543 
1544  ASSERT( *new_tmin < gv.bin[nd]->tedust );
1545 
1546  /* >>chng 02 jan 30, prevent excessive looping when prob distribution simply won't fit, PvH */
1547  if( dPdlnT[nbin-1] > SAFETY*PROB_CUTOFF_HI )
1548  {
1549  if( *ErrorCode == QH_ARRAY_FAIL || *ErrorCode == QH_LOOP_FAIL )
1550  {
1551  ++(*nWideFail);
1552 
1553  if( *nWideFail < WIDE_FAIL_MAX )
1554  {
1555  /* this indicates that low end was OK, but we ran out of room
1556  * to store the high end -> try GetProbDistr_HighLimit instead */
1557  *ErrorCode = MAX2(*ErrorCode,QH_DELTA_FAIL);
1558  }
1559  else
1560  {
1561  *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL);
1562  }
1563  }
1564  else
1565  {
1566  *ErrorCode = MAX2(*ErrorCode,QH_BOUND_FAIL);
1567  }
1568  }
1569 
1570  /* >>chng 01 may 11, rebin the quantum heating results
1571  *
1572  * for grains in intense radiation fields, the code needs high resolution for stability
1573  * and therefore produces lots of small bins, even though the grains never make large
1574  * excurions from the equilibrium temperature; adding in the resulting spectra in RT_diffuse
1575  * takes up an excessive amount of CPU time where most CPU is spent on grains for which
1576  * the quantum treatment matters least, and moreover on temperature bins with very low
1577  * probability; rebinning the results on a coarser grid should help reduce the overhead */
1578  /* >>chng 02 aug 07, use nstart and nend while rebinning */
1579 
1580  nbin = RebinQHeatResults(nd,nstart,nend,p,qtemp,qprob,dPdlnT,u1,delu,Lambda,ErrorCode);
1581 
1582  /* >>chng 01 jul 13, add fail-safe for failure in RebinQHeatResults */
1583  if( nbin == 0 )
1584  {
1585  return;
1586  }
1587 
1588  *qnbin = nbin;
1589 
1590  sum = 0.;
1591  for( j=0; j < nbin; j++ )
1592  {
1593  sum += qprob[j];
1594  }
1595 
1596  /* the fact that the probability normalization fails probably indicates
1597  * that the distribution is too close to a delta function to resolve */
1598  if( fabs(sum-1.) > PROB_TOL )
1599  *ErrorCode = MAX2(*ErrorCode,QH_CONV_FAIL);
1600 
1601  if( trace.lgTrace && trace.lgDustBug )
1602  {
1603  fprintf( ioQQQ,
1604  " zone %ld %s nbin %ld nok %ld nbad %ld total prob %.4e rel_tol %.3e new_tmin %.4e\n",
1605  nzone,gv.bin[nd]->chDstLab,nbin,nok,nbad,sum,rel_tol,*new_tmin );
1606  }
1607  return;
1608 }
1609 
1610 
1611 /* try two consecutive integration steps using stepsize "step/2." (yielding p[k]),
1612  * and also one double integration step using stepsize "step" (yielding p2k).
1613  * the difference fabs(p2k-p[k])/(3.*p[k]) can be used to estimate the relative
1614  * accuracy of p[k] and will be used to adapt the stepsize to an optimal value */
1615 STATIC double TryDoubleStep(double u1[],
1616  double delu[],
1617  double p[],
1618  double qtemp[],
1619  double Lambda[],
1620  double Phi[],
1621  double PhiDrv[],
1622  double step,
1623  /*@out@*/ double *cooling,
1624  long index,
1625  long nd,
1626  /*@out@*/ bool *lgBoundFail)
1627 {
1628  long i,
1629  j,
1630  jlo,
1631  k=-1000;
1632  double bval_jk,
1633  cooling2,
1634  p2k,
1635  p_max,
1636  RelErrCool,
1637  RelErrPk,
1638  sum,
1639  sum2 = -DBL_MAX,
1640  trap1,
1641  trap12 = -DBL_MAX,
1642  trap2,
1643  uhi,
1644  ulo,
1645  umin,
1646  y;
1647 
1648  DEBUG_ENTRY( "TryDoubleStep()" );
1649 
1650  /* sanity checks */
1651  ASSERT( index >= 0 && index < NQGRID-2 && nd >= 0 && nd < gv.nBin && step > 0. );
1652 
1653  ulo = rfield.anu[0];
1654  /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */
1655  /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */
1656  uhi = rfield.anu[gv.bin[nd]->qnflux-1];
1657 
1658  /* >>chng 01 nov 21, skip initial bins if they have very low probability */
1659  p_max = 0.;
1660  for( i=0; i <= index; i++ )
1661  p_max = MAX2(p_max,p[i]);
1662  jlo = 0;
1663  while( p[jlo] < PROB_CUTOFF_LO*p_max )
1664  jlo++;
1665 
1666  for( i=1; i <= 2; i++ )
1667  {
1668  bool lgErr;
1669  long ipLo = 0;
1670  long ipHi = gv.bin[nd]->qnflux-1;
1671  k = index + i;
1672  delu[k] = step/2.;
1673  u1[k] = u1[k-1] + delu[k];
1674  qtemp[k] = inv_ufunct(u1[k],nd,lgBoundFail);
1675  splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(qtemp[k]),&y,&lgErr);
1676  *lgBoundFail = *lgBoundFail || lgErr;
1677  Lambda[k] = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD;
1678 
1679  sum = sum2 = 0.;
1680  trap1 = trap2 = trap12 = 0.;
1681 
1682  /* this loop uses up a large fraction of the total CPU time, it should be well optimized */
1683  for( j=jlo; j < k; j++ )
1684  {
1685  umin = u1[k] - u1[j];
1686 
1687  if( umin >= uhi )
1688  {
1689  /* for increasing j, umin will decrease monotonically. If ( umin > uhi ) holds for
1690  * the current value of j, it must have held for the previous value as well. Hence
1691  * both trap1 and trap2 are zero at this point and we would only be adding zero
1692  * to sum. Therefore we skip this step to save CPU time */
1693  continue;
1694  }
1695  else if( umin > ulo )
1696  {
1697  /* do a bisection search such that rfield.anu[ipLo] <= umin < rfield.anu[ipHi]
1698  * ipoint doesn't always give the correct index into anu (it works for AnuOrg)
1699  * bisection search is also faster, which is important here to save CPU time.
1700  * on the first iteration ipLo equals 0 and the first while loop will be skipped;
1701  * after that umin is monotonically decreasing, and ipHi is retained from the
1702  * previous iteration since it is a valid upper limit; ipLo will equal ipHi-1 */
1703  long ipStep = 1;
1704  /* >>chng 03 feb 03 rjrw: hunt for lower bracket */
1705  while( rfield.anu[ipLo] > (realnum)umin )
1706  {
1707  ipHi = ipLo;
1708  ipLo -= ipStep;
1709  if( ipLo <= 0 )
1710  {
1711  ipLo = 0;
1712  break;
1713  }
1714  ipStep *= 2;
1715  }
1716  /* now do regular bisection search */
1717  while( ipHi-ipLo > 1 )
1718  {
1719  long ipMd = (ipLo+ipHi)/2;
1720  if( rfield.anu[ipMd] > (realnum)umin )
1721  ipHi = ipMd;
1722  else
1723  ipLo = ipMd;
1724  }
1725  bval_jk = Phi[ipLo] + (umin - rfield.anu[ipLo])*PhiDrv[ipLo];
1726  }
1727  else
1728  {
1729  bval_jk = Phi[0];
1730  }
1731 
1732  /* these two quantities are needed to take double step from index -> index+2 */
1733  trap12 = trap1;
1734  sum2 = sum;
1735 
1736  /* bval_jk*gv.bin[nd]->cnv_CM3_pGR is the total excitation rate from j to k and
1737  * higher due to photon absorptions and particle collisions, it already implements
1738  * Eq. 2.17 of Guhathakurtha & Draine, in events/grain/s */
1739  /* >>chng 00 mar 27, factored in hden (in Phi), by PvH */
1740  /* >>chng 00 mar 29, add in contribution due to particle collisions, by PvH */
1741  /* >>chng 01 mar 30, moved multiplication of bval_jk with gv.bin[nd]->cnv_CM3_pGR
1742  * out of loop, PvH */
1743  trap2 = p[j]*bval_jk;
1744  /* Trapezoidal rule, multiplication with factor 0.5 is done outside loop */
1745  sum += (trap1 + trap2)*delu[j];
1746  trap1 = trap2;
1747  }
1748 
1749  /* >>chng 00 mar 27, multiplied with delu, by PvH */
1750  /* >>chng 00 apr 05, taken out Lambda[0], improves convergence at low end dramatically!, by PvH */
1751  /* qprob[k] = sum*gv.bin[nd]->cnv_CM3_pGR*delu[k]/(Lambda[k] - Lambda[0]); */
1752  /* this expression includes factor 0.5 from trapezoidal rule above */
1753  /* p[k] = 0.5*(sum + (trap1 + p[k]*Phi[0])*delu[k])/Lambda[k] */
1754  p[k] = (sum + trap1*delu[k])/(2.*Lambda[k] - Phi[0]*delu[k]);
1755 
1756  // total failure -> force next step to be smaller
1757  if( p[k] <= 0. )
1758  return 3.*QHEAT_TOL;
1759  }
1760 
1761  /* this is estimate for p[k] using one double step of size "step" */
1762  p2k = (sum2 + trap12*step)/(2.*Lambda[k] - Phi[0]*step);
1763 
1764  // total failure -> force next step to be smaller
1765  if( p2k <= 0. )
1766  return 3.*QHEAT_TOL;
1767 
1768  RelErrPk = fabs(p2k-p[k])/p[k];
1769 
1770  /* this is radiative cooling due to the two probability bins we just added */
1771  /* simple trapezoidal rule will not do here, RelErrCool would never converge */
1772  *cooling = log_integral(u1[k-2],p[k-2]*Lambda[k-2],u1[k-1],p[k-1]*Lambda[k-1]);
1773  *cooling += log_integral(u1[k-1],p[k-1]*Lambda[k-1],u1[k],p[k]*Lambda[k]);
1774 
1775  /* same as cooling, but now for double step of size "step" */
1776  cooling2 = log_integral(u1[k-2],p[k-2]*Lambda[k-2],u1[k],p2k*Lambda[k]);
1777 
1778  /* p[0] is not reliable, so ignore convergence test on cooling on first step */
1779  RelErrCool = ( index > 0 ) ? fabs(cooling2-(*cooling))/(*cooling) : 0.;
1780 
1781 /* printf( " TryDoubleStep p[k-1] %.4e p[k] %.4e p2k %.4e\n",p[k-1],p[k],p2k ); */
1782  /* error scales as O(step^3), so this is relative accuracy of p[k] or cooling */
1783  return MAX2(RelErrPk,RelErrCool)/3.;
1784 }
1785 
1786 
1787 /* calculate logarithmic integral from (xx1,yy1) to (xx2,yy2) */
1788 STATIC double log_integral(double xx1,
1789  double yy1,
1790  double xx2,
1791  double yy2)
1792 {
1793  double eps,
1794  result,
1795  xx;
1796 
1797  DEBUG_ENTRY( "log_integral()" );
1798 
1799  /* sanity checks */
1800  ASSERT( xx1 > 0. && yy1 > 0. && xx2 > 0. && yy2 > 0. );
1801 
1802  xx = log(xx2/xx1);
1803  eps = log((xx2/xx1)*(yy2/yy1));
1804  if( fabs(eps) < 1.e-4 )
1805  {
1806  result = xx1*yy1*xx*((eps/6. + 0.5)*eps + 1.);
1807  }
1808  else
1809  {
1810  result = (xx2*yy2 - xx1*yy1)*xx/eps;
1811  }
1812  return result;
1813 }
1814 
1815 
1816 /* scan the probability distribution for valid range */
1817 STATIC void ScanProbDistr(double u1[], /* u1[nbin] */
1818  double dPdlnT[], /* dPdlnT[nbin] */
1819  long nbin,
1820  double maxVal,
1821  long nmax,
1822  /*@out@*/long *nstart,
1823  /*@out@*/long *nstart2,
1824  /*@out@*/long *nend,
1825  long *nWideFail,
1826  QH_Code *ErrorCode)
1827 {
1828  bool lgSetLo,
1829  lgSetHi;
1830  long i;
1831  double deriv_max,
1832  minVal;
1833  const double MIN_FAC_LO = 1.e4;
1834  const double MIN_FAC_HI = 1.e4;
1835 
1836  DEBUG_ENTRY( "ScanProbDistr()" );
1837 
1838  /* sanity checks */
1839  ASSERT( nbin > 0 && nmax >= 0 && nmax < nbin && maxVal > 0. );
1840 
1841  /* sometimes the probability distribution will start falling before settling on
1842  * a rising slope. In such a case nstart will point to the first rising point,
1843  * while nstart2 will point to the point with the steepest derivative on the
1844  * rising slope. The code will use the distribution from nstart to nend as a
1845  * valid probability distribution, but the will use the points near nstart2
1846  * to extrapolate a new value for qtmin if needed */
1847  minVal = maxVal;
1848  *nstart = nmax;
1849  for( i=nmax; i >= 0; --i )
1850  {
1851  if( dPdlnT[i] < minVal )
1852  {
1853  *nstart = i;
1854  minVal = dPdlnT[i];
1855  }
1856  }
1857  deriv_max = 0.;
1858  *nstart2 = nmax;
1859  for( i=nmax; i > *nstart; --i )
1860  {
1861  double deriv = log(dPdlnT[i]/dPdlnT[i-1])/log(u1[i]/u1[i-1]);
1862  if( deriv > deriv_max )
1863  {
1864  *nstart2 = i-1;
1865  deriv_max = deriv;
1866  }
1867  }
1868  *nend = nbin-1;
1869 
1870  /* now do quality control; these checks are more stringent than the ones in GetProbDistr_LowLimit */
1871  lgSetLo = ( nmax >= *nend || maxVal/dPdlnT[*nend] < MIN_FAC_HI );
1872  /* >>chng 03 jan 22, prevent problems if both dPdlnT and its derivative are continuously rising,
1873  * in which case both lgSetLo and lgSetHi are set and QH_WIDE_FAIL is triggered;
1874  * this can happen if qtmin is far too low; triggered by pahtest.in, PvH */
1875  if( lgSetLo )
1876  /* use relaxed test if lgSetLo is already set */
1877  lgSetHi = ( nmax <= *nstart || maxVal/dPdlnT[*nstart] < MIN_FAC_LO );
1878  else
1879  lgSetHi = ( nmax <= *nstart2 || maxVal/dPdlnT[*nstart2] < MIN_FAC_LO );
1880 
1881  if( lgSetLo && lgSetHi )
1882  {
1883  ++(*nWideFail);
1884 
1885  if( *nWideFail >= WIDE_FAIL_MAX )
1886  *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL);
1887  }
1888 
1889  if( lgSetLo )
1890  *ErrorCode = MAX2(*ErrorCode,QH_LOW_FAIL);
1891 
1892  if( lgSetHi )
1893  *ErrorCode = MAX2(*ErrorCode,QH_HIGH_FAIL);
1894 
1895  /* there are insufficient bins to attempt rebinning */
1896  if( *ErrorCode < QH_NO_REBIN && (*nend - *nstart) < NQMIN )
1897  *ErrorCode = MAX2(*ErrorCode,QH_NBIN_FAIL);
1898 
1899  if( trace.lgTrace && trace.lgDustBug )
1900  {
1901  fprintf( ioQQQ, " ScanProbDistr nstart %ld nstart2 %ld nend %ld nmax %ld maxVal %.3e",
1902  *nstart,*nstart2,*nend,nmax,maxVal );
1903  fprintf( ioQQQ, " dPdlnT[nstart] %.3e dPdlnT[nstart2] %.3e dPdlnT[nend] %.3e code %d\n",
1904  dPdlnT[*nstart],dPdlnT[*nstart2],dPdlnT[*nend],*ErrorCode );
1905  }
1906 
1907  if( *ErrorCode >= QH_NO_REBIN )
1908  {
1909  *nstart = -1;
1910  *nstart2 = -1;
1911  *nend = -2;
1912  }
1913  return;
1914 }
1915 
1916 
1917 /* rebin the quantum heating results to speed up RT_diffuse */
1919  long nstart,
1920  long nend,
1921  double p[],
1922  double qtemp[],
1923  double qprob[],
1924  double dPdlnT[],
1925  double u1[],
1926  double delu[],
1927  double Lambda[],
1928  QH_Code *ErrorCode)
1929 {
1930  long i,
1931  newnbin;
1932  double fac,
1933  help,
1934  mul_fac,
1935  *new_delu/*[NQGRID]*/,
1936  *new_dPdlnT/*[NQGRID]*/,
1937  *new_Lambda/*[NQGRID]*/,
1938  *new_p/*[NQGRID]*/,
1939  *new_qprob/*[NQGRID]*/,
1940  *new_qtemp/*[NQGRID]*/,
1941  *new_u1/*[NQGRID]*/,
1942  PP1,
1943  PP2,
1944  RadCooling,
1945  T1,
1946  T2,
1947  Ucheck,
1948  uu1,
1949  uu2;
1950 
1951  DEBUG_ENTRY( "RebinQHeatResults()" );
1952 
1953  /* sanity checks */
1954  ASSERT( nd >= 0 && nd < gv.nBin );
1955  /* >>chng 02 aug 07, changed oldnbin -> nstart..nend */
1956  ASSERT( nstart >= 0 && nstart < nend && nend < NQGRID );
1957 
1958  /* leading entries may have become very small or zero -> skip */
1959  for( i=nstart; i <= nend && dPdlnT[i] < PROB_CUTOFF_LO; i++ ) {}
1960 
1961  /* >>chng 04 oct 17, add fail-safe to keep lint happy, but this should never happen... */
1962  if( i >= NQGRID )
1963  {
1964  *ErrorCode = MAX2(*ErrorCode,QH_REBIN_FAIL);
1965  return 0;
1966  }
1967 
1968  /* >>chng 02 aug 15, use malloc to prevent stack overflows */
1969  new_delu = (double*)MALLOC((size_t)(NQGRID*sizeof(double)));
1970  new_dPdlnT = (double*)MALLOC((size_t)(NQGRID*sizeof(double)));
1971  new_Lambda = (double*)MALLOC((size_t)(NQGRID*sizeof(double)));
1972  new_p = (double*)MALLOC((size_t)(NQGRID*sizeof(double)));
1973  new_qprob = (double*)MALLOC((size_t)(NQGRID*sizeof(double)));
1974  new_qtemp = (double*)MALLOC((size_t)(NQGRID*sizeof(double)));
1975  new_u1 = (double*)MALLOC((size_t)(NQGRID*sizeof(double)));
1976 
1977  newnbin = 0;
1978 
1979  T1 = qtemp[i];
1980  PP1 = p[i];
1981  uu1 = u1[i];
1982 
1983  /* >>chng 04 feb 01, change 2.*NQMIN -> 1.5*NQMIN, PvH */
1984  help = pow(qtemp[nend]/qtemp[i],1./(1.5*NQMIN));
1985  mul_fac = MIN2(QT_RATIO,help);
1986 
1987  Ucheck = u1[i];
1988  RadCooling = 0.;
1989 
1990  while( i < nend )
1991  {
1992  bool lgBoundErr;
1993  bool lgDone= false;
1994  double s0 = 0.;
1995  double s1 = 0.;
1996  double wid = 0.;
1997  double xx,y;
1998 
1999  T2 = T1*mul_fac;
2000 
2001  do
2002  {
2003  double p1,p2,L1,L2,frac,slope;
2004  if( qtemp[i] <= T1 && T1 <= qtemp[i+1] )
2005  {
2006  /* >>chng 01 nov 15, copy uu2 into uu1 instead, PvH */
2007  /* uu1 = ufunct(T1,nd); */
2008  frac = log(T1/qtemp[i]);
2009  slope = log(p[i+1]/p[i])/log(qtemp[i+1]/qtemp[i]);
2010  p1 = p[i]*exp(frac*slope);
2011  slope = log(Lambda[i+1]/Lambda[i])/log(qtemp[i+1]/qtemp[i]);
2012  L1 = Lambda[i]*exp(frac*slope);
2013  }
2014  else
2015  {
2016  /* >>chng 01 nov 15, copy uu2 into uu1 instead, PvH */
2017  /* uu1 = u1[i]; */
2018  p1 = p[i];
2019  L1 = Lambda[i];
2020  }
2021  if( qtemp[i] <= T2 && T2 <= qtemp[i+1] )
2022  {
2023  /* >>chng 02 apr 30, make sure this doesn't point beyond valid range, PvH */
2024  help = ufunct(T2,nd,&lgBoundErr);
2025  uu2 = MIN2(help,u1[i+1]);
2026  ASSERT( !lgBoundErr ); /* this should never be triggered */
2027  frac = log(T2/qtemp[i]);
2028  slope = log(p[i+1]/p[i])/log(qtemp[i+1]/qtemp[i]);
2029  p2 = p[i]*exp(frac*slope);
2030  slope = log(Lambda[i+1]/Lambda[i])/log(qtemp[i+1]/qtemp[i]);
2031  L2 = Lambda[i]*exp(frac*slope);
2032  lgDone = true;
2033  }
2034  else
2035  {
2036  uu2 = u1[i+1];
2037  p2 = p[i+1];
2038  L2 = Lambda[i+1];
2039  /* >>chng 01 nov 15, this caps the range in p(U) integrated in one bin
2040  * it helps avoid spurious QH_BOUND_FAIL's when flank is very steep, PvH */
2041  if( MAX2(p2,PP1)/MIN2(p2,PP1) > sqrt(SAFETY) )
2042  {
2043  lgDone = true;
2044  T2 = qtemp[i+1];
2045  }
2046  ++i;
2047  }
2048  PP2 = p2;
2049  wid += uu2 - uu1;
2050  /* sanity check */
2051  ASSERT( wid >= 0. );
2052  s0 += log_integral(uu1,p1,uu2,p2);
2053  s1 += log_integral(uu1,p1*L1,uu2,p2*L2);
2054  uu1 = uu2;
2055 
2056  } while( i < nend && ! lgDone );
2057 
2058  /* >>chng 01 nov 14, if T2 == qtemp[oldnbin-1], the code will try another iteration
2059  * break here to avoid zero divide, the assert on Ucheck tests if we are really finished */
2060  /* >>chng 01 dec 04, change ( s0 == 0. ) to ( s0 <= 0. ), PvH */
2061  if( s0 <= 0. )
2062  {
2063  ASSERT( wid == 0. );
2064  break;
2065  }
2066 
2067  new_qprob[newnbin] = s0;
2068  new_Lambda[newnbin] = s1/s0;
2069  xx = log(new_Lambda[newnbin]*EN1RYD*gv.bin[nd]->cnv_GR_pH);
2070  splint_safe(gv.bin[nd]->dstems,gv.dsttmp,gv.bin[nd]->dstslp,NDEMS,xx,&y,&lgBoundErr);
2071  ASSERT( !lgBoundErr ); /* this should never be triggered */
2072  new_qtemp[newnbin] = exp(y);
2073  new_u1[newnbin] = ufunct(new_qtemp[newnbin],nd,&lgBoundErr);
2074  ASSERT( !lgBoundErr ); /* this should never be triggered */
2075  new_delu[newnbin] = wid;
2076  new_p[newnbin] = new_qprob[newnbin]/new_delu[newnbin];
2077  new_dPdlnT[newnbin] = new_p[newnbin]*new_qtemp[newnbin]*uderiv(new_qtemp[newnbin],nd);
2078 
2079  Ucheck += wid;
2080  RadCooling += new_qprob[newnbin]*new_Lambda[newnbin];
2081 
2082  T1 = T2;
2083  PP1 = PP2;
2084  ++newnbin;
2085  }
2086 
2087  /* >>chng 01 jul 13, add fail-safe */
2088  if( newnbin < NQMIN )
2089  {
2090  *ErrorCode = MAX2(*ErrorCode,QH_REBIN_FAIL);
2091 
2092  free(new_delu);
2093  free(new_dPdlnT);
2094  free(new_Lambda);
2095  free(new_p);
2096  free(new_qprob);
2097  free(new_qtemp);
2098  free(new_u1);
2099  return 0;
2100  }
2101 
2102  fac = RadCooling*EN1RYD*gv.bin[nd]->cnv_GR_pCM3/gv.bin[nd]->GrainHeat;
2103 
2104  if( trace.lgTrace && trace.lgDustBug )
2105  {
2106  fprintf( ioQQQ, " RebinQHeatResults found tol1 %.4e tol2 %.4e\n",
2107  fabs(u1[nend]/Ucheck-1.), fabs(fac-1.) );
2108  }
2109 
2110  /* do quality control */
2111  /* >>chng 02 apr 30, tighten up check, PvH */
2112  ASSERT( fabs(u1[nend]/Ucheck-1.) < 10.*sqrt((double)(nend-nstart+newnbin))*DBL_EPSILON );
2113 
2114  if( fabs(fac-1.) > CONSERV_TOL )
2115  *ErrorCode = MAX2(*ErrorCode,QH_CONV_FAIL);
2116 
2117  for( i=0; i < newnbin; i++ )
2118  {
2119  /* renormalize the distribution to assure energy conservation */
2120  p[i] = new_p[i]/fac;
2121  qtemp[i] = new_qtemp[i];
2122  qprob[i] = new_qprob[i]/fac;
2123  dPdlnT[i] = new_dPdlnT[i]/fac;
2124  u1[i] = new_u1[i];
2125  delu[i] = new_delu[i];
2126  Lambda[i] = new_Lambda[i];
2127 
2128  /* sanity checks */
2129  ASSERT( qtemp[i] > 0. && qprob[i] > 0. );
2130 
2131 /* printf(" rk %ld T[k] %.6e U[k] %.6e p[k] %.6e dPdlnT[k] %.6e\n",i,qtemp[i],u1[i],p[i],dPdlnT[i]); */
2132  }
2133 
2134  free(new_delu);
2135  free(new_dPdlnT);
2136  free(new_Lambda);
2137  free(new_p);
2138  free(new_qprob);
2139  free(new_qtemp);
2140  free(new_u1);
2141  return newnbin;
2142 }
2143 
2144 
2145 /* calculate approximate probability distribution in high intensity limit */
2147  double TolFac,
2148  double Umax,
2149  double fwhm,
2150  /*@out@*/double qtemp[],
2151  /*@out@*/double qprob[],
2152  /*@out@*/double dPdlnT[],
2153  /*@out@*/double *tol,
2154  /*@out@*/long *qnbin,
2155  /*@out@*/double *new_tmin,
2156  /*@out@*/QH_Code *ErrorCode)
2157 {
2158  bool lgBoundErr,
2159  lgErr;
2160  long i,
2161  nbin;
2162  double c1,
2163  c2,
2164  delu[NQGRID],
2165  fac,
2166  fac1,
2167  fac2,
2168  help1,
2169  help2,
2170  L1,
2171  L2,
2172  Lambda[NQGRID],
2173  mul_fac,
2174  p[NQGRID],
2175  p1,
2176  p2,
2177  RadCooling,
2178  sum,
2179  T1,
2180  T2,
2181  Tlo,
2182  Thi,
2183  Ulo,
2184  Uhi,
2185  uu1,
2186  uu2,
2187  xx,
2188  y;
2189 
2190  DEBUG_ENTRY( "GetProbDistr_HighLimit()" );
2191 
2192  if( trace.lgTrace && trace.lgDustBug )
2193  {
2194  fprintf( ioQQQ, " GetProbDistr_HighLimit called for grain %s\n", gv.bin[nd]->chDstLab );
2195  }
2196 
2197  c1 = sqrt(4.*LN_TWO/PI)/fwhm*exp(-POW2(fwhm/Umax)/(16.*LN_TWO));
2198  c2 = 4.*LN_TWO*POW2(Umax/fwhm);
2199 
2200  fac1 = fwhm/Umax*sqrt(-log(PROB_CUTOFF_LO)/(4.*LN_TWO));
2201  /* >>chng 03 nov 10, safeguard against underflow, PvH */
2202  help1 = Umax*exp(-fac1);
2203  help2 = exp(gv.bin[nd]->DustEnth[0]);
2204  Ulo = MAX2(help1,help2);
2205  /* >>chng 03 jan 28, ignore lgBoundErr on lower boundary, low-T grains have negigible emission, PvH */
2206  Tlo = inv_ufunct(Ulo,nd,&lgBoundErr);
2207 
2208  fac2 = fwhm/Umax*sqrt(-log(PROB_CUTOFF_HI)/(4.*LN_TWO));
2209  /* >>chng 03 nov 10, safeguard against overflow, PvH */
2210  if( fac2 > log(DBL_MAX/10.) )
2211  {
2212  *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL);
2213  return;
2214  }
2215  Uhi = Umax*exp(fac2);
2216  Thi = inv_ufunct(Uhi,nd,&lgBoundErr);
2217 
2218  nbin = 0;
2219 
2220  T1 = Tlo;
2221  uu1 = ufunct(T1,nd,&lgErr);
2222  lgBoundErr = lgBoundErr || lgErr;
2223  help1 = log(uu1/Umax);
2224  p1 = c1*exp(-c2*POW2(help1));
2225  splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(T1),&y,&lgErr);
2226  lgBoundErr = lgBoundErr || lgErr;
2227  L1 = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD;
2228 
2229  /* >>chng 03 nov 10, safeguard against underflow, PvH */
2230  if( uu1*p1*L1 < 1.e5*DBL_MIN )
2231  {
2232  *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL);
2233  return;
2234  }
2235 
2236  /* >>chng 04 feb 01, change 2.*NQMIN -> 1.2*NQMIN, PvH */
2237  help1 = pow(Thi/Tlo,1./(1.2*NQMIN));
2238  mul_fac = MIN2(QT_RATIO,help1);
2239 
2240  sum = 0.;
2241  RadCooling = 0.;
2242 
2243  do
2244  {
2245  double s0,s1,wid;
2246 
2247  T2 = T1*mul_fac;
2248  uu2 = ufunct(T2,nd,&lgErr);
2249  lgBoundErr = lgBoundErr || lgErr;
2250  help1 = log(uu2/Umax);
2251  p2 = c1*exp(-c2*POW2(help1));
2252  splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(T2),&y,&lgErr);
2253  lgBoundErr = lgBoundErr || lgErr;
2254  L2 = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD;
2255 
2256  wid = uu2 - uu1;
2257  s0 = log_integral(uu1,p1,uu2,p2);
2258  s1 = log_integral(uu1,p1*L1,uu2,p2*L2);
2259 
2260  qprob[nbin] = s0;
2261  Lambda[nbin] = s1/s0;
2262  xx = log(Lambda[nbin]*EN1RYD*gv.bin[nd]->cnv_GR_pH);
2263  splint_safe(gv.bin[nd]->dstems,gv.dsttmp,gv.bin[nd]->dstslp,NDEMS,xx,&y,&lgErr);
2264  lgBoundErr = lgBoundErr || lgErr;
2265  qtemp[nbin] = exp(y);
2266  delu[nbin] = wid;
2267  p[nbin] = qprob[nbin]/delu[nbin];
2268  dPdlnT[nbin] = p[nbin]*qtemp[nbin]*uderiv(qtemp[nbin],nd);
2269 
2270  sum += qprob[nbin];
2271  RadCooling += qprob[nbin]*Lambda[nbin];
2272 
2273  T1 = T2;
2274  uu1 = uu2;
2275  p1 = p2;
2276  L1 = L2;
2277 
2278  ++nbin;
2279 
2280  } while( T2 < Thi && nbin < NQGRID );
2281 
2282  fac = RadCooling*EN1RYD*gv.bin[nd]->cnv_GR_pCM3/gv.bin[nd]->GrainHeat;
2283 
2284  for( i=0; i < nbin; ++i )
2285  {
2286  qprob[i] /= fac;
2287  dPdlnT[i] /= fac;
2288  }
2289 
2290  *tol = sum/fac;
2291  *qnbin = nbin;
2292  *new_tmin = qtemp[0];
2293  *ErrorCode = MAX2(*ErrorCode,QH_ANALYTIC);
2294 
2295  /* do quality control */
2296  if( TolFac > STRICT )
2297  *ErrorCode = MAX2(*ErrorCode,QH_ANALYTIC_RELAX);
2298 
2299  if( lgBoundErr )
2300  *ErrorCode = MAX2(*ErrorCode,QH_THIGH_FAIL);
2301 
2302  if( fabs(sum/fac-1.) > PROB_TOL )
2303  *ErrorCode = MAX2(*ErrorCode,QH_CONV_FAIL);
2304 
2305  if( dPdlnT[0] > SAFETY*PROB_CUTOFF_LO || dPdlnT[nbin-1] > SAFETY*PROB_CUTOFF_HI )
2306  *ErrorCode = MAX2(*ErrorCode,QH_BOUND_FAIL);
2307 
2308  if( trace.lgTrace && trace.lgDustBug )
2309  {
2310  fprintf( ioQQQ, " GetProbDistr_HighLimit found tol1 %.4e tol2 %.4e\n",
2311  fabs(sum-1.), fabs(sum/fac-1.) );
2312  fprintf( ioQQQ, " zone %ld %s nbin %ld total prob %.4e new_tmin %.4e\n",
2313  nzone,gv.bin[nd]->chDstLab,nbin,sum/fac,*new_tmin );
2314  }
2315  return;
2316 }
2317 
2318 
2319 /* calculate derivative of the enthalpy function dU/dT (aka the specific heat) at a given temperature, in Ryd/K */
2320 STATIC double uderiv(double temp,
2321  long int nd)
2322 {
2323  enth_type ecase;
2324  long int i,
2325  j;
2326  double N_C,
2327  N_H;
2328  double deriv = 0.,
2329  hok[3] = {1275., 1670., 4359.},
2330  numer,
2331  dnumer,
2332  denom,
2333  ddenom,
2334  x;
2335 
2336 
2337  DEBUG_ENTRY( "uderiv()" );
2338 
2339  if( temp <= 0. )
2340  {
2341  fprintf( ioQQQ, " uderiv called with non-positive temperature: %.6e\n" , temp );
2342  cdEXIT(EXIT_FAILURE);
2343  }
2344  ASSERT( nd >= 0 && nd < gv.nBin );
2345 
2346  ecase = gv.which_enth[gv.bin[nd]->matType];
2347  switch( ecase )
2348  {
2349  case ENTH_CAR:
2350  numer = (4.15e-22/EN1RYD)*pow(temp,3.3);
2351  dnumer = (3.3*4.15e-22/EN1RYD)*pow(temp,2.3);
2352  denom = 1. + 6.51e-03*temp + 1.5e-06*temp*temp + 8.3e-07*pow(temp,2.3);
2353  ddenom = 6.51e-03 + 2.*1.5e-06*temp + 2.3*8.3e-07*pow(temp,1.3);
2354  /* dU/dT for pah/graphitic grains in Ryd/K, derived from:
2355  * >>refer grain physics Guhathakurta & Draine, 1989, ApJ, 345, 230 */
2356  deriv = (dnumer*denom - numer*ddenom)/POW2(denom);
2357  break;
2358  case ENTH_CAR2:
2359  /* dU/dT for graphite grains in Ryd/K, using eq 9 of */
2360  /* >>refer grain physics Draine B.T., and Li A., 2001, ApJ, 551, 807 */
2361  deriv = (DebyeDeriv(temp/863.,2) + 2.*DebyeDeriv(temp/2504.,2))*BOLTZMANN/EN1RYD;
2362  break;
2363  case ENTH_SIL:
2364  /* dU/dT for silicate grains (and grey grains) in Ryd/K */
2365  /* limits of tlim set above, 0 and DBL_MAX, so always OK */
2366  /* >>refer grain physics Guhathakurta & Draine, 1989, ApJ, 345, 230 */
2367  for( j = 0; j < 4; j++ )
2368  {
2369  if( temp > tlim[j] && temp <= tlim[j+1] )
2370  {
2371  deriv = cval[j]*pow(temp,ppower[j]);
2372  break;
2373  }
2374  }
2375  break;
2376  case ENTH_SIL2:
2377  /* dU/dT for silicate grains in Ryd/K, using eq 11 of */
2378  /* >>refer grain physics Draine B.T., and Li A., 2001, ApJ, 551, 807 */
2379  deriv = (2.*DebyeDeriv(temp/500.,2) + DebyeDeriv(temp/1500.,3))*BOLTZMANN/EN1RYD;
2380  break;
2381  case ENTH_PAH:
2382  /* dU/dT for PAH grains in Ryd/K, using eq A.4 of */
2383  /* >>refer grain physics Dwek E., Arendt R.G., Fixsen D.J. et al., 1997, ApJ, 475, 565 */
2384  /* this expression is only valid upto 2000K */
2385  x = log10(MIN2(temp,2000.));
2386  deriv = pow(10.,-21.26+3.1688*x-0.401894*POW2(x))/EN1RYD;
2387  break;
2388  case ENTH_PAH2:
2389  /* dU/dT for PAH grains in Ryd/K, approximately using eq 33 of */
2390  /* >>refer grain physics Draine B.T., and Li A., 2001, ApJ, 551, 807 */
2391  /* N_C and N_H should actually be nint(N_C) and nint(N_H),
2392  * but this can lead to FP overflow for very large grains */
2393  N_C = NO_ATOMS(nd);
2394  if( N_C <= 25. )
2395  {
2396  N_H = 0.5*N_C;
2397  }
2398  else if( N_C <= 100. )
2399  {
2400  N_H = 2.5*sqrt(N_C);
2401  }
2402  else
2403  {
2404  N_H = 0.25*N_C;
2405  }
2406  deriv = 0.;
2407  for( i=0; i < 3; i++ )
2408  {
2409  double help1 = hok[i]/temp;
2410  if( help1 < 300. )
2411  {
2412  double help2 = exp(help1);
2413  double help3 = ( help1 < 1.e-7 ) ? help1*(1.+0.5*help1) : help2-1.;
2414  deriv += N_H/(N_C-2.)*POW2(help1)*help2/POW2(help3)*BOLTZMANN/EN1RYD;
2415  }
2416  }
2417  deriv += (DebyeDeriv(temp/863.,2) + 2.*DebyeDeriv(temp/2504.,2))*BOLTZMANN/EN1RYD;
2418  break;
2419  default:
2420  fprintf( ioQQQ, " uderiv called with unknown type for enthalpy function: %d\n", ecase );
2421  cdEXIT(EXIT_FAILURE);
2422  }
2423 
2424  /* >>chng 00 mar 23, use formula 3.1 of Guhathakurtha & Draine, by PvH */
2425  /* >>chng 03 jan 17, use MAX2(..,1) to prevent crash for extremely small grains, PvH */
2426  deriv *= MAX2(NO_ATOMS(nd)-2.,1.);
2427 
2428  if( deriv <= 0. )
2429  {
2430  fprintf( ioQQQ, " uderiv finds non-positive derivative: %.6e, what's up?\n" , deriv );
2431  cdEXIT(EXIT_FAILURE);
2432  }
2433  return( deriv );
2434 }
2435 
2436 
2437 /* calculate the enthalpy of a grain at a given temperature, in Ryd */
2438 STATIC double ufunct(double temp,
2439  long int nd,
2440  /*@out@*/ bool *lgBoundErr)
2441 {
2442  double enthalpy,
2443  y;
2444 
2445  DEBUG_ENTRY( "ufunct()" );
2446 
2447  if( temp <= 0. )
2448  {
2449  fprintf( ioQQQ, " ufunct called with non-positive temperature: %.6e\n" , temp );
2450  cdEXIT(EXIT_FAILURE);
2451  }
2452  ASSERT( nd >= 0 && nd < gv.nBin );
2453 
2454  /* >>chng 02 apr 22, interpolate in DustEnth array to get enthalpy, by PvH */
2455  splint_safe(gv.dsttmp,gv.bin[nd]->DustEnth,gv.bin[nd]->EnthSlp,NDEMS,log(temp),&y,lgBoundErr);
2456  enthalpy = exp(y);
2457 
2458  ASSERT( enthalpy > 0. );
2459  return( enthalpy );
2460 }
2461 
2462 
2463 /* this is the inverse of ufunct: determine grain temperature as a function of enthalpy */
2464 STATIC double inv_ufunct(double enthalpy,
2465  long int nd,
2466  /*@out@*/ bool *lgBoundErr)
2467 {
2468  double temp,
2469  y;
2470 
2471  DEBUG_ENTRY( "inv_ufunct()" );
2472 
2473  if( enthalpy <= 0. )
2474  {
2475  fprintf( ioQQQ, " inv_ufunct called with non-positive enthalpy: %.6e\n" , enthalpy );
2476  cdEXIT(EXIT_FAILURE);
2477  }
2478  ASSERT( nd >= 0 && nd < gv.nBin );
2479 
2480  /* >>chng 02 apr 22, interpolate in DustEnth array to get temperature, by PvH */
2481  splint_safe(gv.bin[nd]->DustEnth,gv.dsttmp,gv.bin[nd]->EnthSlp2,NDEMS,log(enthalpy),&y,lgBoundErr);
2482  temp = exp(y);
2483 
2484  ASSERT( temp > 0. );
2485  return( temp );
2486 }
2487 
2488 
2489 /* initialize interpolation arrys for grain enthalpy */
2490 void InitEnthalpy(void)
2491 {
2492  double C_V1,
2493  C_V2,
2494  tdust1,
2495  tdust2;
2496  long int i,
2497  j,
2498  nd;
2499 
2500  DEBUG_ENTRY( "InitEnthalpy()" );
2501 
2502  for( nd=0; nd < gv.nBin; nd++ )
2503  {
2504  tdust2 = GRAIN_TMIN;
2505  C_V2 = uderiv(tdust2,nd);
2506  /* at low temps, C_V = C*T^3 -> U = C*T^4/4 = C_V*T/4 */
2507  gv.bin[nd]->DustEnth[0] = C_V2*tdust2/4.;
2508  tdust1 = tdust2;
2509  C_V1 = C_V2;
2510 
2511  for( i=1; i < NDEMS; i++ )
2512  {
2513  double tmid,Cmid;
2514  tdust2 = exp(gv.dsttmp[i]);
2515  C_V2 = uderiv(tdust2,nd);
2516  tmid = sqrt(tdust1*tdust2);
2517  /* this ensures accuracy for silicate enthalpy */
2518  for( j=1; j < 4; j++ )
2519  {
2520  if( tdust1 < tlim[j] && tlim[j] < tdust2 )
2521  {
2522  tmid = tlim[j];
2523  break;
2524  }
2525  }
2526  Cmid = uderiv(tmid,nd);
2527  gv.bin[nd]->DustEnth[i] = gv.bin[nd]->DustEnth[i-1] +
2528  log_integral(tdust1,C_V1,tmid,Cmid) +
2529  log_integral(tmid,Cmid,tdust2,C_V2);
2530  tdust1 = tdust2;
2531  C_V1 = C_V2;
2532  }
2533  }
2534 
2535  /* conversion for logarithmic interpolation */
2536  for( nd=0; nd < gv.nBin; nd++ )
2537  {
2538  for( i=0; i < NDEMS; i++ )
2539  {
2540  gv.bin[nd]->DustEnth[i] = log(gv.bin[nd]->DustEnth[i]);
2541  }
2542  }
2543 
2544  /* now find slopes from spline fit */
2545  for( nd=0; nd < gv.nBin; nd++ )
2546  {
2547  /* set up coefficients for splint */
2548  spline(gv.dsttmp,gv.bin[nd]->DustEnth,NDEMS,2e31,2e31,gv.bin[nd]->EnthSlp);
2549  spline(gv.bin[nd]->DustEnth,gv.dsttmp,NDEMS,2e31,2e31,gv.bin[nd]->EnthSlp2);
2550  }
2551  return;
2552 }
2553 
2554 
2555 /* helper function for calculating specific heat, uses Debye theory */
2556 STATIC double DebyeDeriv(double x,
2557  long n)
2558 {
2559  long i,
2560  nn;
2561  double res,
2562  *xx,
2563  *rr,
2564  *aa,
2565  *ww;
2566 
2567  DEBUG_ENTRY( "DebyeDeriv()" );
2568 
2569  ASSERT( x > 0. );
2570  ASSERT( n == 2 || n == 3 );
2571 
2572  if( x < 0.001 )
2573  {
2574  /* for general n this is Gamma(n+2)*zeta(n+1)*powi(x,n) */
2575  if( n == 2 )
2576  {
2577  res = 6.*1.202056903159594*POW2(x);
2578  }
2579  else if( n == 3 )
2580  {
2581  res = 24.*1.082323233711138*POW3(x);
2582  }
2583  else
2584  /* added to keep lint happy - note that assert above confimred that n is 2 or 3,
2585  * but lint flagged possible flow without defn of res */
2586  TotalInsanity();
2587  }
2588  else
2589  {
2590  nn = 4*MAX2(4,2*(long)(0.05/x));
2591  xx = (double *)MALLOC(sizeof(double)*(unsigned)nn);
2592  rr = (double *)MALLOC(sizeof(double)*(unsigned)nn);
2593  aa = (double *)MALLOC(sizeof(double)*(unsigned)nn);
2594  ww = (double *)MALLOC(sizeof(double)*(unsigned)nn);
2595  gauss_legendre(nn,xx,aa);
2596  gauss_init(nn,0.,1.,xx,aa,rr,ww);
2597 
2598  res = 0.;
2599  for( i=0; i < nn; i++ )
2600  {
2601  double help1 = rr[i]/x;
2602  if( help1 < 300. )
2603  {
2604  double help2 = exp(help1);
2605  double help3 = ( help1 < 1.e-7 ) ? help1*(1.+0.5*help1) : help2-1.;
2606  res += ww[i]*powi(rr[i],n+1)*help2/POW2(help3);
2607  }
2608  }
2609  res /= POW2(x);
2610 
2611  free(ww);
2612  free(aa);
2613  free(rr);
2614  free(xx);
2615  }
2616  return (double)n*res;
2617 }

Generated for cloudy by doxygen 1.8.3.1