style: clean up ielr

* src/AnnotationList.c, src/ielr.c: Fix include order.
Prefer `res` to `result`.
Reduce scopes.
Be free of the oldish 76 cols limitation when it clutters too much the
code.
Denest when possible (we're starving for horizontal width).
This commit is contained in:
Akim Demaille
2020-06-25 07:34:37 +02:00
parent 670c7e7a75
commit 3be228f64c
2 changed files with 179 additions and 186 deletions

View File

@@ -18,11 +18,13 @@
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include <config.h>
#include "system.h"
#include "AnnotationList.h"
#include "lalr.h"
#include "system.h"
#include "ielr.h"
#include "lalr.h"
/**
* \pre
@@ -38,15 +40,14 @@ static AnnotationList*
AnnotationList__alloc_on_obstack (ContributionIndex contribution_count,
struct obstack *annotations_obstackp)
{
AnnotationList *result;
size_t contributions_size =
contribution_count * sizeof result->contributions[0];
result = obstack_alloc (annotations_obstackp,
AnnotationList *res;
size_t contributions_size = contribution_count * sizeof res->contributions[0];
res = obstack_alloc (annotations_obstackp,
offsetof (AnnotationList, contributions)
+ contributions_size);
result->next = NULL;
result->inadequacyNode = NULL;
return result;
res->next = NULL;
res->inadequacyNode = NULL;
return res;
}
/**
@@ -102,13 +103,12 @@ AnnotationList__insertInto (AnnotationList *self, AnnotationList **list,
for (node = list; *node; node = &(*node)->next)
{
int cmp = 0;
ContributionIndex ci;
if (self->inadequacyNode->id < (*node)->inadequacyNode->id)
cmp = 1;
else if ((*node)->inadequacyNode->id < self->inadequacyNode->id)
cmp = -1;
else
for (ci = 0;
for (ContributionIndex ci = 0;
cmp == 0 && ci < self->inadequacyNode->contributionCount;
++ci)
{
@@ -120,9 +120,7 @@ AnnotationList__insertInto (AnnotationList *self, AnnotationList **list,
else if (AnnotationList__isContributionAlways (*node, ci))
cmp = 1;
else
{
size_t item;
for (item = 0; cmp == 0 && item < nitems; ++item)
for (size_t item = 0; cmp == 0 && item < nitems; ++item)
{
if (!Sbitset__test (self->contributions[ci], item))
{
@@ -133,7 +131,6 @@ AnnotationList__insertInto (AnnotationList *self, AnnotationList **list,
cmp = 1;
}
}
}
if (cmp < 0)
{
self->next = *node;
@@ -232,7 +229,7 @@ AnnotationList__computePredecessorAnnotations (
annotation_node->inadequacyNode = self->inadequacyNode;
bool potential_contribution = false;
bitset *lookaheads = NULL;
{
for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
{
symbol_number contribution_token =
@@ -340,7 +337,6 @@ AnnotationList__computePredecessorAnnotations (
}
}
}
}
/* If the predecessor has any contributions besides just "always" and
"never" contributions:
@@ -425,11 +421,7 @@ AnnotationList__compute_from_inadequacies (
BITSET_FOR_EACH (biter_conflict, conflicted_tokens, conflicted_token, 0)
{
AnnotationList *annotation_node;
/* FIXME: Would a BITSET_FRUGAL or BITEST_SPARSE be more efficient? Now
or convert it inside InadequacyList__new_conflict? */
bitset actions = bitset_create (s->reductions->num + 1, BITSET_FIXED);
ContributionIndex contribution_count = 0;
bool potential_contribution = false;
/* Allocate the annotation node. */
{
@@ -444,6 +436,11 @@ AnnotationList__compute_from_inadequacies (
annotations_obstackp);
}
/* FIXME: Would a BITSET_FRUGAL or BITEST_SPARSE be more efficient? Now
or convert it inside InadequacyList__new_conflict? */
bitset actions = bitset_create (s->reductions->num + 1, BITSET_FIXED);
bool potential_contribution = false;
/* Add a contribution for each reduction that has conflicted_token as a
lookahead. */
{
@@ -464,11 +461,8 @@ AnnotationList__compute_from_inadequacies (
annotations_obstackp);
/* Catch item_i up to rule_i. This works because both are
sorted on rule number. */
while (!item_number_is_rule_number (
ritem[s->items[item_i]])
|| item_number_as_rule_number (
ritem[s->items[item_i]])
!= the_rule->number)
while (!item_number_is_rule_number (ritem[s->items[item_i]])
|| item_number_as_rule_number (ritem[s->items[item_i]]) != the_rule->number)
{
++item_i;
aver (item_i < s->nitems);
@@ -571,11 +565,9 @@ AnnotationList__debug (AnnotationList const *self, size_t nitems, int spaces)
AnnotationIndex ai;
for (a = self, ai = 0; a; a = a->next, ++ai)
{
for (int j = 0; j < spaces; ++j)
putc (' ', stderr);
fprintf (stderr, "Annotation %d (manifesting state %d):\n",
fprintf (stderr, "%*sAnnotation %d (manifesting state %d):\n",
spaces, "",
ai, a->inadequacyNode->manifestingState->number);
{
bitset_bindex rulei
= bitset_first (a->inadequacyNode->inadequacy.conflict.actions);
for (ContributionIndex ci = 0; ci < a->inadequacyNode->contributionCount; ++ci)
@@ -583,8 +575,7 @@ AnnotationList__debug (AnnotationList const *self, size_t nitems, int spaces)
symbol_number token =
InadequacyList__getContributionToken (a->inadequacyNode, ci)
->content->number;
for (int j = 0; j < spaces+2; ++j)
putc (' ', stderr);
fprintf (stderr, "%*s", spaces+2, "");
if (ci == InadequacyList__getShiftContributionIndex (
a->inadequacyNode))
fprintf (stderr, "Contributes shift of token %d.\n", token);
@@ -610,7 +601,6 @@ AnnotationList__debug (AnnotationList const *self, size_t nitems, int spaces)
}
}
}
}
void
AnnotationList__computeLookaheadFilter (AnnotationList const *self,
@@ -775,8 +765,7 @@ AnnotationList__computeDominantContribution (AnnotationList const *self,
/* R/R conflict, so the reduction with the lowest rule number dominates.
Fortunately, contributions are sorted by rule number. */
for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
if (AnnotationList__stateMakesContribution (self, nitems, ci,
lookaheads))
if (AnnotationList__stateMakesContribution (self, nitems, ci, lookaheads))
{
if (require_split_stable
&& !AnnotationList__isContributionAlways (self, ci))

View File

@@ -18,10 +18,11 @@
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include <config.h>
#include "system.h"
#include "ielr.h"
#include "system.h"
#include <bitset.h>
#include <timevar.h>
@@ -318,7 +319,7 @@ static state ***
ielr_compute_predecessors (void)
{
int *predecessor_counts = xnmalloc (nstates, sizeof *predecessor_counts);
state ***result = xnmalloc (nstates, sizeof *result);
state ***res = xnmalloc (nstates, sizeof *res);
for (state_number i = 0; i < nstates; ++i)
predecessor_counts[i] = 0;
for (state_number i = 0; i < nstates; ++i)
@@ -326,18 +327,18 @@ ielr_compute_predecessors (void)
++predecessor_counts[states[i]->transitions->states[j]->number];
for (state_number i = 0; i < nstates; ++i)
{
result[i] = xnmalloc (predecessor_counts[i]+1, sizeof *result[i]);
result[i][predecessor_counts[i]] = NULL;
res[i] = xnmalloc (predecessor_counts[i]+1, sizeof *res[i]);
res[i][predecessor_counts[i]] = NULL;
predecessor_counts[i] = 0;
}
for (state_number i = 0; i < nstates; ++i)
for (int j = 0; j < states[i]->transitions->num; ++j)
{
state_number k = states[i]->transitions->states[j]->number;
result[k][predecessor_counts[k]++] = states[i];
res[k][predecessor_counts[k]++] = states[i];
}
free (predecessor_counts);
return result;
return res;
}
/**
@@ -708,16 +709,18 @@ ielr_compute_state (bitsetv follow_kernel_items, bitsetv always_follows,
/* Determine whether there's an isocore of t with which these lookaheads can
be merged. */
{
if (annotation_lists)
{
AnnotationIndex ai;
AnnotationList *a;
if (annotation_lists)
for (ai = 0, a = annotation_lists[lr0_isocore->state->number];
a;
++ai, a = a->next)
work1[ai] =
AnnotationList__computeDominantContribution (
a, lr0_isocore->state->nitems, lookaheads, false);
}
for (this_isocorep = &t->state_list;
this_isocorep == &t->state_list || *this_isocorep != t->state_list;
this_isocorep = &(*this_isocorep)->nextIsocore)
@@ -726,6 +729,8 @@ ielr_compute_state (bitsetv follow_kernel_items, bitsetv always_follows,
break;
if (annotation_lists)
{
AnnotationIndex ai;
AnnotationList *a;
for (ai = 0, a = annotation_lists[lr0_isocore->state->number];
a;
++ai, a = a->next)
@@ -789,15 +794,14 @@ ielr_compute_state (bitsetv follow_kernel_items, bitsetv always_follows,
actually new. */
if (has_lookaheads)
{
size_t i;
if (!(*this_isocorep)->lookaheads)
{
(*this_isocorep)->lookaheads =
xnmalloc (t->nitems, sizeof (*this_isocorep)->lookaheads);
for (i = 0; i < t->nitems; ++i)
for (size_t i = 0; i < t->nitems; ++i)
(*this_isocorep)->lookaheads[i] = NULL;
}
for (i = 0; i < t->nitems; ++i)
for (size_t i = 0; i < t->nitems; ++i)
if (!bitset_empty_p (lookaheads[i]))
{
if (!(*this_isocorep)->lookaheads[i])