In my spare time I like to program theorems.
const PI = 3.141592653589793; // Math.PI
function CalculatePi(total = 0, x = 1, addSubBool = false) {
if (addSubBool) {
total -= (4 / x)
console.log(`${total}tDifference: ${PI - total}`);
} else {
total += (4 / x)
console.log(`${total}tDifference: ${total - PI}`);
}
if (total !== PI) {
setTimeout(function() {
CalculatePi(total, x + 2, !addSubBool);
}, 100);
}
}
CalculatePi();
This one is a recursive call to calculate pi. I based it off this link
My question is, how do I calculate a limit in programming? This call will go to infinity.
So how does a calculator or other programming languages calculate a limit as x approaches infinity? Would I set a maximum for x?
Below we use loop
and recur
to allow your function to infinitely recur. And instead of using setTimeout
, I'm recurring as fast as possible but instead outputting a result for every interval of 1000 x
-
const recur = (...values) =>
({ recur, values })
const loop = f =>
{ let acc = f ()
while (acc && acc.recur === recur)
acc = f (...acc.values)
return acc
}
const calculatePi = (limit = Infinity) =>
loop // loop our function ...
( (total = 0, x = 1, addSubBool = false) =>
{ if (x > limit) // stop condition
return total
if (x % 1e3 === 1) // display progress as we go
console.log(x, total, total - Math.PI)
if (total === Math.PI) // solution found
return total
if (addSubBool)
return recur // recur with ...
( total - 4 / x // next total
, x + 2 // next x
, false // next addSubBool
)
else
return recur // recur with ...
( total + 4 / x // next total
, x + 2 // next x
, true // next addSubBool
)
}
)
console.log(calculatePi(1e7))
As you can see this method takes a long time to converge on the answer. Even after ten million (10M) x, we still have only calculated 6 points of precision -
x total diff
...
9997001 3.1415924535297624 -2.0006003076389334e-7
9998001 3.1415924535497695 -2.0004002365681117e-7
9999001 3.141592453569776 -2.0002001699381822e-7
A different approach will take precision
as an input of calculatePi
. Instead of limiting by some arbitrary x
, we will continue computing until a specific precision is reached. For demo purposes, this function also returns x
so we can see how big x
had to get before the desired precision was reached -
const calculatePi = (precision = 1e5) =>
loop
( (total = 0, x = 1, addSubBool = false) =>
{ if (total * precision >> 0 === Math.PI * precision >> 0)
return [ total, x ]
if (addSubBool)
return recur
( total - 4 / x
, x + 2
, false
)
else
return recur
( total + 4 / x
, x + 2
, true
)
}
)
As you can see, x
goes beyond 37 million to reach 7 decimals of precision -
console .log
( calculatePi (1e2)
// [ 3.14999586659347, 239 ]
, calculatePi (1e3)
// [ 3.141000236580159, 3377 ]
, calculatePi (1e4)
// [ 3.1415000095284658, 21589 ]
, calculatePi (1e5)
// [ 3.141599999994786, 272243 ]
, calculatePi (1e7)
// [ 3.1415926000000005, 37320609 ]
)
Expand the snippet below to verify the results in your browser -
const recur = (...values) =>
({ recur, values })
const loop = f =>
{ let acc = f ()
while (acc && acc.recur === recur)
acc = f (...acc.values)
return acc
}
const calculatePi = (precision = 1e5) =>
loop
( (total = 0, x = 1, addSubBool = false) =>
{ if (total * precision >> 0 === Math.PI * precision >> 0)
return [ total, x ]
if (addSubBool)
return recur
( total - 4 / x
, x + 2
, false
)
else
return recur
( total + 4 / x
, x + 2
, true
)
}
)
console .log
( calculatePi (1e2)
// [ 3.14999586659347, 239 ]
, calculatePi (1e3)
// [ 3.141000236580159, 3377 ]
, calculatePi (1e4)
// [ 3.1415000095284658, 21589 ]
, calculatePi (1e5)
// [ 3.141599999994786, 272243 ]
, calculatePi (1e7)
// [ 3.1415926000000005, 37320609 ]
)
Finally, it doesn't make much sense to check against Math.PI
when calculating pi; I imagine whole goal is to calculate a number we pretend not to know. To do that, we start with some guess
and then measure the difference between it and the total
. If the guess is within the specified tolerance, return the guess -
const calculatePi = (precision = 1e5) =>
loop
// guess starts at 1
( (guess = 1, total = 0, x = 1, addSubBool = false) =>
{ if (Math .abs (guess - total) * precision < 1)
return [ guess, x ]
if (addSubBool)
return recur // recur with ...
( total // next guess
, total - 4 / x // next total
, x + 2 // next x
, false // next addSubBool
)
else
return recur // recur with ...
( total // next guess
, total + 4 / x // next total
, x + 2 // next x
, true // next addSubBool
)
}
)
We can see it works as intended. Admittedly, I'm surprised by the correlation between input precision and the necessary x
to compute it -
console .log
( calculatePi (1e2)
// [ 3.136592684838816, 403 ]
, calculatePi (1e3)
// [ 3.1410926536210413, 4003 ]
, calculatePi (1e4)
// [ 3.1415426535898248, 40003 ]
, calculatePi (1e5)
// [ 3.1415876535897618, 400003 ]
, calculatePi (1e7)
// [ 3.141592603589817, 40000003 ]
)
Expand the snippet below to verify the results in your browser -
const recur = (...values) =>
({ recur, values })
const loop = f =>
{ let acc = f ()
while (acc && acc.recur === recur)
acc = f (...acc.values)
return acc
}
const calculatePi = (precision = 1e5) =>
loop
// guess starts at 1
( (guess = 1, total = 0, x = 1, addSubBool = false) =>
{ if (Math .abs (guess - total) * precision < 1)
return [ guess, x ]
if (addSubBool)
return recur // recur with ...
( total // next guess
, total - 4 / x // next total
, x + 2 // next x
, false // next addSubBool
)
else
return recur // recur with ...
( total // next guess
, total + 4 / x // next total
, x + 2 // next x
, true // next addSubBool
)
}
)
console .log
( calculatePi (1e2)
// [ 3.136592684838816, 403 ]
, calculatePi (1e3)
// [ 3.1410926536210413, 4003 ]
, calculatePi (1e4)
// [ 3.1415426535898248, 40003 ]
, calculatePi (1e5)
// [ 3.1415876535897618, 400003 ]
, calculatePi (1e7)
// [ 3.141592603589817, 40000003 ]
)
Personally, I would define a suitable tolerance and then compare the difference between the current and last values. If the difference falls below the tolerance stop computing and you know your result is accurate to plus or minus your tolerance.
You could also keep computing until you get two values the same as that likely means you have reached the limit of precision for the datatype you're storing the result in and any further calculation is meaningless.